summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_idle.c4
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/base/dd.c8
-rw-r--r--drivers/block/xen-blkback/common.h1
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/bluetooth/btusb.c7
-rw-r--r--drivers/bus/moxtet.c4
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/hw_random/ba431-rng.c16
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c27
-rw-r--r--drivers/char/hw_random/cctrng.c20
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c14
-rw-r--r--drivers/char/hw_random/pic32-rng.c3
-rw-r--r--drivers/char/hw_random/xiphera-trng.c4
-rw-r--r--drivers/char/random.c21
-rw-r--r--drivers/char/tpm/eventlog/acpi.c33
-rw-r--r--drivers/char/tpm/eventlog/common.c3
-rw-r--r--drivers/char/tpm/eventlog/efi.c29
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c1
-rw-r--r--drivers/clk/clk-fixed-factor.c9
-rw-r--r--drivers/clk/clk.c47
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c50
-rw-r--r--drivers/clk/socfpga/clk-gate.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c23
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c8
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c28
-rw-r--r--drivers/clocksource/hyperv_timer.c251
-rw-r--r--drivers/clocksource/ingenic-ost.c9
-rw-r--r--drivers/clocksource/ingenic-timer.c2
-rw-r--r--drivers/clocksource/sh_cmt.c5
-rw-r--r--drivers/clocksource/timer-atmel-tcb.c4
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c2
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c2
-rw-r--r--drivers/clocksource/timer-npcm7xx.c1
-rw-r--r--drivers/clocksource/timer-of.c4
-rw-r--r--drivers/clocksource/timer-pistachio.c4
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c157
-rw-r--r--drivers/clocksource/timer-vf-pit.c2
-rw-r--r--drivers/crypto/allwinner/Kconfig14
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c23
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c11
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c12
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c12
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c18
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h8
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h18
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c3
-rw-r--r--drivers/crypto/atmel-ecc.c30
-rw-r--r--drivers/crypto/atmel-i2c.c2
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/atmel-tdes.c1
-rw-r--r--drivers/crypto/bcm/cipher.c7
-rw-r--r--drivers/crypto/bcm/spu.c16
-rw-r--r--drivers/crypto/bcm/spu2.c43
-rw-r--r--drivers/crypto/bcm/util.c4
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c3
-rw-r--r--drivers/crypto/caam/caampkc.c3
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c9
-rw-r--r--drivers/crypto/cavium/zip/common.h1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c12
-rw-r--r--drivers/crypto/ccp/ccp-ops.c1
-rw-r--r--drivers/crypto/ccp/sev-dev.c6
-rw-r--r--drivers/crypto/ccp/sp-dev.c12
-rw-r--r--drivers/crypto/ccp/sp-dev.h15
-rw-r--r--drivers/crypto/ccp/sp-pci.c1
-rw-r--r--drivers/crypto/ccp/tee-dev.c57
-rw-r--r--drivers/crypto/ccp/tee-dev.h20
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c32
-rw-r--r--drivers/crypto/chelsio/chcr_core.c5
-rw-r--r--drivers/crypto/chelsio/chcr_core.h1
-rw-r--r--drivers/crypto/geode-aes.c4
-rw-r--r--drivers/crypto/hisilicon/Kconfig2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h18
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c921
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c158
-rw-r--r--drivers/crypto/hisilicon/qm.c396
-rw-r--r--drivers/crypto/hisilicon/qm.h29
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c2
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c13
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h10
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c137
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c267
-rw-r--r--drivers/crypto/hisilicon/sgl.c37
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c13
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h50
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c698
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c99
-rw-r--r--drivers/crypto/img-hash.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c7
-rw-r--r--drivers/crypto/keembay/keembay-ocs-aes-core.c8
-rw-r--r--drivers/crypto/keembay/keembay-ocs-hcu-core.c8
-rw-r--r--drivers/crypto/keembay/ocs-hcu.c8
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c8
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c33
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c144
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c4
-rw-r--r--drivers/crypto/nx/nx-sha256.c2
-rw-r--r--drivers/crypto/nx/nx-sha512.c2
-rw-r--r--drivers/crypto/nx/nx.c5
-rw-r--r--drivers/crypto/nx/nx_debugfs.c2
-rw-r--r--drivers/crypto/omap-aes.c7
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c25
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h13
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.c40
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h14
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c29
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c17
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c32
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c4
-rw-r--r--drivers/crypto/qce/cipher.h1
-rw-r--r--drivers/crypto/qce/common.c25
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/sha.c143
-rw-r--r--drivers/crypto/qce/skcipher.c69
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c2
-rw-r--r--drivers/crypto/s5p-sss.c17
-rw-r--r--drivers/crypto/sa2ul.c143
-rw-r--r--drivers/crypto/sa2ul.h4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c8
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c5
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c10
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h4
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h4
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h15
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c18
-rw-r--r--drivers/crypto/vmx/aes.c2
-rw-r--r--drivers/crypto/vmx/aes_cbc.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/aes_xts.c2
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/crypto/vmx/vmx.c2
-rw-r--r--drivers/cxl/mem.c152
-rw-r--r--drivers/dax/bus.c6
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/idxd/device.c65
-rw-r--r--drivers/dma/idxd/idxd.h3
-rw-r--r--drivers/dma/idxd/init.c11
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/sysfs.c19
-rw-r--r--drivers/dma/plx_dma.c18
-rw-r--r--drivers/dma/tegra20-apb-dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c31
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/turris-mox-rwtm.c4
-rw-r--r--drivers/gpio/gpio-moxtet.c4
-rw-r--r--drivers/gpio/gpio-omap.c9
-rw-r--r--drivers/gpio/gpiolib-sysfs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c14
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.h1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c40
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h1
-rw-r--r--drivers/hid/hid-alps.c1
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-cp2112.c22
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/channel.c103
-rw-r--r--drivers/hv/channel_mgmt.c86
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv.c152
-rw-r--r--drivers/hv/hv_balloon.c89
-rw-r--r--drivers/hv/hv_trace.h15
-rw-r--r--drivers/hv/ring_buffer.c10
-rw-r--r--drivers/hv/vmbus_drv.c93
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c1
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c4
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c2
-rw-r--r--drivers/i2c/i2c-core-base.c7
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c21
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h1
-rw-r--r--drivers/infiniband/hw/hfi1/init.c10
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c2
-rw-r--r--drivers/input/joystick/n64joy.c4
-rw-r--r--drivers/input/keyboard/nspire-keypad.c56
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1
-rw-r--r--drivers/input/touchscreen/elants_i2c.c5
-rw-r--r--drivers/input/touchscreen/s6sy761.c4
-rw-r--r--drivers/irqchip/Kconfig18
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c4
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c2
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c2
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c10
-rw-r--r--drivers/irqchip/irq-gic-v4.c27
-rw-r--r--drivers/irqchip/irq-hip04.c4
-rw-r--r--drivers/irqchip/irq-idt3243x.c124
-rw-r--r--drivers/irqchip/irq-jcore-aic.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c4
-rw-r--r--drivers/irqchip/irq-meson-gpio.c2
-rw-r--r--drivers/irqchip/irq-mst-intc.c98
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c2
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-sifive-plic.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c7
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-tb10x.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c2
-rw-r--r--drivers/irqchip/irq-vic.c4
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c161
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c2
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c4
-rw-r--r--drivers/md/dm-verity-fec.c11
-rw-r--r--drivers/md/dm-verity-fec.h1
-rw-r--r--drivers/mfd/intel_pmt.c112
-rw-r--r--drivers/misc/lkdtm/bugs.c17
-rw-r--r--drivers/misc/lkdtm/core.c1
-rw-r--r--drivers/misc/lkdtm/lkdtm.h1
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c5
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c24
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c195
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c30
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h6
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c102
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c14
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/jme.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c8
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c48
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c84
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c12
-rw-r--r--drivers/net/geneve.c30
-rw-r--r--drivers/net/ieee802154/atusb.c1
-rw-r--r--drivers/net/phy/bcm-phy-lib.c13
-rw-r--r--drivers/net/phy/marvell.c32
-rw-r--r--drivers/net/tun.c48
-rw-r--r--drivers/net/usb/hso.c33
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vrf.c10
-rw-r--r--drivers/net/vxlan.c18
-rw-r--r--drivers/net/wan/hdlc_fr.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h4
-rw-r--r--drivers/net/wireless/virt_wifi.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c12
-rw-r--r--drivers/nvdimm/bus.c14
-rw-r--r--drivers/nvdimm/pmem.c37
-rw-r--r--drivers/nvdimm/region_devs.c16
-rw-r--r--drivers/of/fdt.c36
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/overlay.c24
-rw-r--r--drivers/of/property.c11
-rw-r--r--drivers/of/unittest.c22
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/perf/arm-cci.c12
-rw-r--r--drivers/perf/arm-ccn.c31
-rw-r--r--drivers/perf/arm-cmn.c22
-rw-r--r--drivers/perf/arm_dmc620_pmu.c2
-rw-r--r--drivers/perf/arm_dsu_pmu.c5
-rw-r--r--drivers/perf/arm_pmu_platform.c54
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c36
-rw-r--r--drivers/perf/arm_spe_pmu.c3
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c7
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c348
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c301
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c355
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c500
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c79
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h20
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c530
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/perf/qcom_l3_pmu.c4
-rw-r--r--drivers/perf/thunderx2_pmu.c4
-rw-r--r--drivers/perf/xgene_pmu.c4
-rw-r--r--drivers/pinctrl/core.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c6
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c4
-rw-r--r--drivers/platform/surface/Kconfig69
-rw-r--r--drivers/platform/surface/Makefile3
-rw-r--r--drivers/platform/surface/aggregator/controller.c16
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c626
-rw-r--r--drivers/platform/surface/surface_dtx.c1289
-rw-r--r--drivers/platform/surface/surface_platform_profile.c190
-rw-r--r--drivers/platform/surface/surfacepro3_button.c2
-rw-r--r--drivers/platform/x86/Kconfig26
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/adv_swbutton.c121
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c5
-rw-r--r--drivers/platform/x86/classmate-laptop.c2
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c4
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-descriptor.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c34
-rw-r--r--drivers/platform/x86/dell/dell-wmi.c3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c203
-rw-r--r--drivers/platform/x86/hp-wmi.c101
-rw-r--r--drivers/platform/x86/intel-hid.c16
-rw-r--r--drivers/platform/x86/intel-vbtn.c3
-rw-r--r--drivers/platform/x86/intel-wmi-sbl-fw-update.c3
-rw-r--r--drivers/platform/x86/intel-wmi-thunderbolt.c3
-rw-r--r--drivers/platform/x86/intel_chtdc_ti_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel_pmc_core.c493
-rw-r--r--drivers/platform/x86/intel_pmc_core.h53
-rw-r--r--drivers/platform/x86/intel_pmt_class.c46
-rw-r--r--drivers/platform/x86/intel_pmt_class.h1
-rw-r--r--drivers/platform/x86/intel_pmt_telemetry.c20
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c33
-rw-r--r--drivers/platform/x86/lg-laptop.c2
-rw-r--r--drivers/platform/x86/panasonic-laptop.c2
-rw-r--r--drivers/platform/x86/pmc_atom.c28
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c256
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c37
-rw-r--r--drivers/platform/x86/wmi-bmof.c3
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/platform/x86/xo15-ebook.c6
-rw-r--r--drivers/ras/cec.c15
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c8
-rw-r--r--drivers/remoteproc/pru_rproc.c20
-rw-r--r--drivers/remoteproc/qcom_pil_info.c2
-rw-r--r--drivers/scsi/hpsa_cmd.h78
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/libsas/sas_ata.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c8
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c18
-rw-r--r--drivers/scsi/scsi_transport_srp.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c31
-rw-r--r--drivers/sh/intc/core.c49
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c3
-rw-r--r--drivers/thunderbolt/retimer.c4
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c4
-rw-r--r--drivers/usb/usbip/stub_dev.c11
-rw-r--r--drivers/usb/usbip/usbip_common.h3
-rw-r--r--drivers/usb/usbip/usbip_event.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c1
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c30
-rw-r--r--drivers/usb/usbip/vudc_dev.c1
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c5
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h4
-rw-r--r--drivers/vdpa/mlx5/core/mr.c13
-rw-r--r--drivers/vdpa/mlx5/core/resources.c3
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c40
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vhost/vdpa.c6
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c4
-rw-r--r--drivers/xen/Kconfig31
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/events/events_base.c12
-rw-r--r--drivers/xen/pcpu.c35
-rw-r--r--drivers/xen/time.c3
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c446
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c475
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c6
-rw-r--r--drivers/xen/xen-pciback/vpci.c7
-rw-r--r--drivers/xen/xen-stub.c90
517 files changed, 11921 insertions, 4718 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 768a6b4d2368..4e2d76b8b697 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -544,9 +544,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
return -ENODEV;
#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
- /* If NMI wants to wake up CPU0, start CPU0. */
- if (wakeup_cpu0())
- start_cpu0();
+ cond_wakeup_cpu0();
#endif
}
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index b574cce98dc3..422753d52244 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2054,7 +2054,7 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
}
submitted++;
ATM_SKB(skb)->vcc = vcc;
- tasklet_disable(&ENI_DEV(vcc->dev)->task);
+ tasklet_disable_in_atomic(&ENI_DEV(vcc->dev)->task);
res = do_tx(skb);
tasklet_enable(&ENI_DEV(vcc->dev)->task);
if (res == enq_ok) return 0;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e2cf3b29123e..37a5e5f8b221 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -292,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev)
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
- struct device_private *private, *p;
+ struct device_private *p;
driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
- list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
- dev_info(private->device, "deferred probe pending\n");
+ mutex_lock(&deferred_probe_mutex);
+ list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
+ dev_info(p->device, "deferred probe pending\n");
+ mutex_unlock(&deferred_probe_mutex);
wake_up_all(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index b0c71d3a81a0..bda5c815e441 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -313,6 +313,7 @@ struct xen_blkif {
struct work_struct free_work;
unsigned int nr_ring_pages;
+ bool multi_ref;
/* All rings for this device. */
struct xen_blkif_ring *rings;
unsigned int nr_rings;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2aaf690352c..125b22205d38 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
for (i = 0; i < nr_grefs; i++) {
char ring_ref_name[RINGREF_NAME_LEN];
- snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
+ if (blkif->multi_ref)
+ snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
+ else {
+ WARN_ON(i != 0);
+ snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
+ }
+
err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
"%u", &ring_ref[i]);
if (err != 1) {
- if (nr_grefs == 1)
- break;
-
err = -EINVAL;
xenbus_dev_fatal(dev, err, "reading %s/%s",
dir, ring_ref_name);
@@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
}
}
- if (err != 1) {
- WARN_ON(nr_grefs != 1);
-
- err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
- &ring_ref[0]);
- if (err != 1) {
- err = -EINVAL;
- xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
- return err;
- }
- }
-
err = -ENOMEM;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
blkif->nr_rings, blkif->blk_protocol, protocol,
blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
- ring_page_order = xenbus_read_unsigned(dev->otherend,
- "ring-page-order", 0);
-
- if (ring_page_order > xen_blkif_max_ring_order) {
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
+ &ring_page_order);
+ if (err != 1) {
+ blkif->nr_ring_pages = 1;
+ blkif->multi_ref = false;
+ } else if (ring_page_order <= xen_blkif_max_ring_order) {
+ blkif->nr_ring_pages = 1 << ring_page_order;
+ blkif->multi_ref = true;
+ } else {
err = -EINVAL;
xenbus_dev_fatal(dev, err,
"requested ring page order %d exceed max:%d",
@@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
return err;
}
- blkif->nr_ring_pages = 1 << ring_page_order;
-
if (blkif->nr_rings == 1)
return read_per_ring_refs(&blkif->rings[0], dev->otherend);
else {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e1c6798889f4..06c4efd97780 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2397,7 +2397,7 @@ static void blkfront_connect(struct blkfront_info *info)
}
/*
- * physcial-sector-size is a newer field, so old backends may not
+ * physical-sector-size is a newer field, so old backends may not
* provide this. Assume physical sector size to be the same as
* sector_size in that case.
*/
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 52683fd22e05..5cbfbd948f67 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -4849,8 +4849,8 @@ static int btusb_probe(struct usb_interface *intf,
data->diag = NULL;
}
- if (!enable_autosuspend)
- usb_disable_autosuspend(data->udev);
+ if (enable_autosuspend)
+ usb_enable_autosuspend(data->udev);
err = hci_register_dev(hdev);
if (err < 0)
@@ -4910,9 +4910,6 @@ static void btusb_disconnect(struct usb_interface *intf)
gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
-
- if (!enable_autosuspend)
- usb_enable_autosuspend(data->udev);
}
#ifdef CONFIG_PM
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index b20fdcbd035b..fd87a59837fa 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -2,7 +2,7 @@
/*
* Turris Mox module configuration bus driver
*
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek BehĂșn <kabel@kernel.org>
*/
#include <dt-bindings/bus/moxtet.h>
@@ -879,6 +879,6 @@ static void __exit moxtet_exit(void)
}
module_exit(moxtet_exit);
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index dd9e7343a5e3..ea0424922de7 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -618,7 +618,7 @@ mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
* This part of the memory is above 4 GB, so we don't
* care for the MBus bridge hole.
*/
- if (reg_start >= 0x100000000ULL)
+ if ((u64)reg_start >= 0x100000000ULL)
continue;
/*
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index a086dd34f932..4f501e4842ab 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -125,7 +125,7 @@ config AGP_HP_ZX1
config AGP_PARISC
tristate "HP Quicksilver AGP support"
- depends on AGP && PARISC && 64BIT
+ depends on AGP && PARISC && 64BIT && IOMMU_SBA
help
This option gives you AGP GART support for the HP Quicksilver
AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c
index 410b50b05e21..5b7ca0416490 100644
--- a/drivers/char/hw_random/ba431-rng.c
+++ b/drivers/char/hw_random/ba431-rng.c
@@ -170,7 +170,6 @@ static int ba431_trng_init(struct hwrng *rng)
static int ba431_trng_probe(struct platform_device *pdev)
{
struct ba431_trng *ba431;
- struct resource *res;
int ret;
ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
@@ -179,8 +178,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
ba431->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ba431->base = devm_ioremap_resource(&pdev->dev, res);
+ ba431->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ba431->base))
return PTR_ERR(ba431->base);
@@ -193,7 +191,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ba431);
- ret = hwrng_register(&ba431->rng);
+ ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
if (ret) {
dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
return ret;
@@ -204,15 +202,6 @@ static int ba431_trng_probe(struct platform_device *pdev)
return 0;
}
-static int ba431_trng_remove(struct platform_device *pdev)
-{
- struct ba431_trng *ba431 = platform_get_drvdata(pdev);
-
- hwrng_unregister(&ba431->rng);
-
- return 0;
-}
-
static const struct of_device_id ba431_trng_dt_ids[] = {
{ .compatible = "silex-insight,ba431-rng", .data = NULL },
{ /* sentinel */ }
@@ -225,7 +214,6 @@ static struct platform_driver ba431_trng_driver = {
.of_match_table = ba431_trng_dt_ids,
},
.probe = ba431_trng_probe,
- .remove = ba431_trng_remove,
};
module_platform_driver(ba431_trng_driver);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 1a7c43b43c6b..e7dd457e9b22 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -32,6 +33,7 @@ struct bcm2835_rng_priv {
void __iomem *base;
bool mask_interrupts;
struct clk *clk;
+ struct reset_control *reset;
};
static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
@@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng)
int ret = 0;
u32 val;
- if (!IS_ERR(priv->clk)) {
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
if (priv->mask_interrupts) {
/* mask the interrupt */
@@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng)
/* disable rng hardware */
rng_writel(priv, 0, RNG_CTRL);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
struct bcm2835_rng_of_data {
@@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
/* Clock is optional on most platforms */
- priv->clk = devm_clk_get(dev, NULL);
- if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
priv->rng.name = pdev->name;
priv->rng.init = bcm2835_rng_init;
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 7a293f2147a0..302ffa354c2f 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -486,7 +486,6 @@ static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
static int cctrng_probe(struct platform_device *pdev)
{
- struct resource *req_mem_cc_regs = NULL;
struct cctrng_drvdata *drvdata;
struct device *dev = &pdev->dev;
int rc = 0;
@@ -510,27 +509,16 @@ static int cctrng_probe(struct platform_device *pdev)
drvdata->circ.buf = (char *)drvdata->data_buf;
- /* Get device resources */
- /* First CC registers space */
- req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Map registers space */
- drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->cc_base)) {
dev_err(dev, "Failed to ioremap registers");
return PTR_ERR(drvdata->cc_base);
}
- dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
- req_mem_cc_regs);
- dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
- &req_mem_cc_regs->start, drvdata->cc_base);
-
/* Then IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Failed getting IRQ resource\n");
+ if (irq < 0)
return irq;
- }
/* parse sampling rate from device tree */
rc = cc_trng_parse_sampling_ratio(drvdata);
@@ -585,7 +573,7 @@ static int cctrng_probe(struct platform_device *pdev)
atomic_set(&drvdata->pending_hw, 1);
/* registration of the hwrng device */
- rc = hwrng_register(&drvdata->rng);
+ rc = devm_hwrng_register(dev, &drvdata->rng);
if (rc) {
dev_err(dev, "Could not register hwrng device.\n");
goto post_pm_err;
@@ -618,8 +606,6 @@ static int cctrng_remove(struct platform_device *pdev)
dev_dbg(dev, "Releasing cctrng resources...\n");
- hwrng_unregister(&drvdata->rng);
-
cc_trng_pm_fini(drvdata);
cc_trng_clk_fini(drvdata);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 8c1c47dd9f46..adb3c2bd7783 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -396,7 +396,7 @@ static ssize_t hwrng_attr_selected_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+ return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index eb7db27f9f19..d740b8814bf3 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -25,13 +25,13 @@
*/
#include <linux/hw_random.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/stop_machine.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
#define PFX KBUILD_MODNAME ": "
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 5cc5fc504968..cede9f159102 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -30,8 +30,7 @@
#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define RNG_REG_STATUS_RDY (1 << 0)
@@ -378,16 +377,13 @@ MODULE_DEVICE_TABLE(of, omap_rng_of_match);
static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
int irq, err;
- match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
- if (!match) {
- dev_err(dev, "no compatible OF match\n");
- return -EINVAL;
- }
- priv->pdata = match->data;
+ priv->pdata = of_device_get_match_data(dev);
+ if (!priv->pdata)
+ return -ENODEV;
+
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index e8210c1715cf..99c8bd0859a1 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev)
priv->rng.name = pdev->name;
priv->rng.read = pic32_rng_read;
- ret = hwrng_register(&priv->rng);
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret)
goto err_register;
@@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev)
{
struct pic32_rng *rng = platform_get_drvdata(pdev);
- hwrng_unregister(&rng->rng);
writel(0, rng->base + RNGCON);
clk_disable_unprepare(rng->clk);
return 0;
diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c
index 7bdab8c8a6a8..2a9fea72b2e0 100644
--- a/drivers/char/hw_random/xiphera-trng.c
+++ b/drivers/char/hw_random/xiphera-trng.c
@@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev)
int ret;
struct xiphera_trng *trng;
struct device *dev = &pdev->dev;
- struct resource *res;
trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trng->mem = devm_ioremap_resource(dev, res);
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0fe9e200e4c8..605969ed0f96 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -500,7 +500,6 @@ struct entropy_store {
unsigned short add_ptr;
unsigned short input_rotate;
int entropy_count;
- unsigned int initialized:1;
unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
};
@@ -660,7 +659,7 @@ static void process_random_ready_list(void)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
- int entropy_count, orig, has_initialized = 0;
+ int entropy_count, orig;
const int pool_size = r->poolinfo->poolfracbits;
int nfrac = nbits << ENTROPY_SHIFT;
@@ -717,23 +716,14 @@ retry:
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
- if (has_initialized) {
- r->initialized = 1;
- kill_fasync(&fasync, SIGIO, POLL_IN);
- }
-
trace_credit_entropy_bits(r->name, nbits,
entropy_count >> ENTROPY_SHIFT, _RET_IP_);
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
- if (crng_init < 2) {
- if (entropy_bits < 128)
- return;
+ if (crng_init < 2 && entropy_bits >= 128)
crng_reseed(&primary_crng, r);
- entropy_bits = ENTROPY_BITS(r);
- }
}
}
@@ -819,7 +809,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
{
- memcpy(&crng->state[0], "expand 32-byte k", 16);
+ chacha_init_consts(crng->state);
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
crng_init_try_arch(crng);
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
@@ -827,7 +817,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
static void __init crng_initialize_primary(struct crng_state *crng)
{
- memcpy(&crng->state[0], "expand 32-byte k", 16);
+ chacha_init_consts(crng->state);
_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
if (crng_init_try_arch_early(crng) && trust_cpu) {
invalidate_batched_entropy();
@@ -1372,8 +1362,7 @@ retry:
}
/*
- * This function does the actual extraction for extract_entropy and
- * extract_entropy_user.
+ * This function does the actual extraction for extract_entropy.
*
* Note: we assume that .poolwords is a multiple of 16 words.
*/
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 3633ed70f48f..1b18ce5ebab1 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -41,6 +41,27 @@ struct acpi_tcpa {
};
};
+/* Check that the given log is indeed a TPM2 log. */
+static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
+{
+ struct tcg_efi_specid_event_head *efispecid;
+ struct tcg_pcr_event *event_header;
+ int n;
+
+ if (len < sizeof(*event_header))
+ return false;
+ len -= sizeof(*event_header);
+ event_header = bios_event_log;
+
+ if (len < sizeof(*efispecid))
+ return false;
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
+
+ n = memcmp(efispecid->signature, TCG_SPECID_SIG,
+ sizeof(TCG_SPECID_SIG));
+ return n == 0;
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
struct acpi_table_tpm2 *tbl;
struct acpi_tpm2_phy *tpm2_phy;
int format;
+ int ret;
log = &chip->log;
@@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
log->bios_event_log_end = log->bios_event_log + len;
+ ret = -EIO;
virt = acpi_os_map_iomem(start, len);
if (!virt)
goto err;
@@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
+ !tpm_is_tpm2_log(log->bios_event_log, len)) {
+ /* try EFI log next */
+ ret = -ENODEV;
+ goto err;
+ }
+
return format;
err:
kfree(log->bios_event_log);
log->bios_event_log = NULL;
- return -EIO;
+ return ret;
}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 7460f230bae4..8512ec76d526 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
int log_version;
int rc = 0;
+ if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
+ return;
+
rc = tpm_read_log(chip);
if (rc < 0)
return;
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index 35229e5143ca..e6cb9d525e30 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
{
struct efi_tcg2_final_events_table *final_tbl = NULL;
+ int final_events_log_size = efi_tpm_final_log_size;
struct linux_efi_tpm_eventlog *log_tbl;
struct tpm_bios_log *log;
u32 log_size;
@@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
ret = tpm_log_version;
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
- efi_tpm_final_log_size == 0 ||
+ final_events_log_size == 0 ||
tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
goto out;
final_tbl = memremap(efi.tpm_final_log,
- sizeof(*final_tbl) + efi_tpm_final_log_size,
+ sizeof(*final_tbl) + final_events_log_size,
MEMREMAP_WB);
if (!final_tbl) {
pr_err("Could not map UEFI TPM final log\n");
@@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
goto out;
}
- efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
+ /*
+ * The 'final events log' size excludes the 'final events preboot log'
+ * at its beginning.
+ */
+ final_events_log_size -= log_tbl->final_events_preboot_size;
+ /*
+ * Allocate memory for the 'combined log' where we will append the
+ * 'final events log' to.
+ */
tmp = krealloc(log->bios_event_log,
- log_size + efi_tpm_final_log_size,
+ log_size + final_events_log_size,
GFP_KERNEL);
if (!tmp) {
kfree(log->bios_event_log);
@@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
log->bios_event_log = tmp;
/*
- * Copy any of the final events log that didn't also end up in the
- * main log. Events can be logged in both if events are generated
+ * Append any of the 'final events log' that didn't also end up in the
+ * 'main log'. Events can be logged in both if events are generated
* between GetEventLog() and ExitBootServices().
*/
memcpy((void *)log->bios_event_log + log_size,
final_tbl->events + log_tbl->final_events_preboot_size,
- efi_tpm_final_log_size);
+ final_events_log_size);
+ /*
+ * The size of the 'combined log' is the size of the 'main log' plus
+ * the size of the 'final events log'.
+ */
log->bios_event_log_end = log->bios_event_log +
- log_size + efi_tpm_final_log_size;
+ log_size + final_events_log_size;
out:
memunmap(final_tbl);
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index ec9a65e7887d..f19c227d20f4 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
expected = be32_to_cpup((__be32 *)(buf + 2));
if (expected > buf_len) {
dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
+ rc = -E2BIG;
goto out_err;
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 4f7bf3929d6d..4e4b6d367612 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -66,7 +66,14 @@ EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res)
{
- clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw);
+ struct clk_fixed_factor *fix = res;
+
+ /*
+ * We can not use clk_hw_unregister_fixed_factor, since it will kfree()
+ * the hw, resulting in double free. Just unregister the hw and let
+ * devres code kfree() it.
+ */
+ clk_hw_unregister(&fix->hw);
}
static struct clk_hw *
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5052541a0986..39cfc6c6a8d2 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4357,20 +4357,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
- break;
+ goto found;
/* if clk wasn't in the notifier list, allocate new clk_notifier */
- if (cn->clk != clk) {
- cn = kzalloc(sizeof(*cn), GFP_KERNEL);
- if (!cn)
- goto out;
+ cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+ if (!cn)
+ goto out;
- cn->clk = clk;
- srcu_init_notifier_head(&cn->notifier_head);
+ cn->clk = clk;
+ srcu_init_notifier_head(&cn->notifier_head);
- list_add(&cn->node, &clk_notifier_list);
- }
+ list_add(&cn->node, &clk_notifier_list);
+found:
ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->core->notifier_count++;
@@ -4395,32 +4394,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
- struct clk_notifier *cn = NULL;
- int ret = -EINVAL;
+ struct clk_notifier *cn;
+ int ret = -ENOENT;
if (!clk || !nb)
return -EINVAL;
clk_prepare_lock();
- list_for_each_entry(cn, &clk_notifier_list, node)
- if (cn->clk == clk)
- break;
-
- if (cn->clk == clk) {
- ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+ list_for_each_entry(cn, &clk_notifier_list, node) {
+ if (cn->clk == clk) {
+ ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
- clk->core->notifier_count--;
+ clk->core->notifier_count--;
- /* XXX the notifier code should handle this better */
- if (!cn->notifier_head.head) {
- srcu_cleanup_notifier_head(&cn->notifier_head);
- list_del(&cn->node);
- kfree(cn);
+ /* XXX the notifier code should handle this better */
+ if (!cn->notifier_head.head) {
+ srcu_cleanup_notifier_head(&cn->notifier_head);
+ list_del(&cn->node);
+ kfree(cn);
+ }
+ break;
}
-
- } else {
- ret = -ENOENT;
}
clk_prepare_unlock();
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
index dbac5651ab85..9bcf2f8ed4de 100644
--- a/drivers/clk/qcom/camcc-sc7180.c
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
.name = "cam_cc_bps_clk_src",
.parent_data = cam_cc_parent_data_2,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
.name = "cam_cc_cci_0_clk_src",
.parent_data = cam_cc_parent_data_5,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
.name = "cam_cc_cci_1_clk_src",
.parent_data = cam_cc_parent_data_5,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
.name = "cam_cc_cphy_rx_clk_src",
.parent_data = cam_cc_parent_data_3,
.num_parents = 6,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
.name = "cam_cc_csi0phytimer_clk_src",
.parent_data = cam_cc_parent_data_0,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
.name = "cam_cc_csi1phytimer_clk_src",
.parent_data = cam_cc_parent_data_0,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
.name = "cam_cc_csi2phytimer_clk_src",
.parent_data = cam_cc_parent_data_0,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
.name = "cam_cc_csi3phytimer_clk_src",
.parent_data = cam_cc_parent_data_0,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
.name = "cam_cc_fast_ahb_clk_src",
.parent_data = cam_cc_parent_data_0,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
.name = "cam_cc_icp_clk_src",
.parent_data = cam_cc_parent_data_2,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
.name = "cam_cc_ife_0_clk_src",
.parent_data = cam_cc_parent_data_4,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
.name = "cam_cc_ife_0_csid_clk_src",
.parent_data = cam_cc_parent_data_3,
.num_parents = 6,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
.name = "cam_cc_ife_1_clk_src",
.parent_data = cam_cc_parent_data_4,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
.name = "cam_cc_ife_1_csid_clk_src",
.parent_data = cam_cc_parent_data_3,
.num_parents = 6,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
.parent_data = cam_cc_parent_data_4,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
.name = "cam_cc_ife_lite_csid_clk_src",
.parent_data = cam_cc_parent_data_3,
.num_parents = 6,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
.name = "cam_cc_ipe_0_clk_src",
.parent_data = cam_cc_parent_data_2,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
.name = "cam_cc_jpeg_clk_src",
.parent_data = cam_cc_parent_data_2,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
.name = "cam_cc_lrme_clk_src",
.parent_data = cam_cc_parent_data_6,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
.name = "cam_cc_mclk0_clk_src",
.parent_data = cam_cc_parent_data_1,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
.name = "cam_cc_mclk1_clk_src",
.parent_data = cam_cc_parent_data_1,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
.name = "cam_cc_mclk2_clk_src",
.parent_data = cam_cc_parent_data_1,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
.name = "cam_cc_mclk3_clk_src",
.parent_data = cam_cc_parent_data_1,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
.name = "cam_cc_mclk4_clk_src",
.parent_data = cam_cc_parent_data_1,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index 43ecd507bf83..cf94a12459ea 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
val &= GENMASK(socfpgaclk->width - 1, 0);
/* Check for GPIO_DB_CLK by its offset */
- if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
+ if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
div = val + 1;
else
div = (1 << val);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index d0177824c518..1b885964fb34 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -51,7 +51,7 @@
static unsigned arch_timers_present __initdata;
-static void __iomem *arch_counter_base;
+static void __iomem *arch_counter_base __ro_after_init;
struct arch_timer {
void __iomem *base;
@@ -60,15 +60,16 @@ struct arch_timer {
#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
-static u32 arch_timer_rate;
-static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
+static u32 arch_timer_rate __ro_after_init;
+u32 arch_timer_rate1 __ro_after_init;
+static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
static struct clock_event_device __percpu *arch_timer_evt;
-static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
-static bool arch_timer_c3stop;
-static bool arch_timer_mem_use_virtual;
-static bool arch_counter_suspend_stop;
+static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
+static bool arch_timer_c3stop __ro_after_init;
+static bool arch_timer_mem_use_virtual __ro_after_init;
+static bool arch_counter_suspend_stop __ro_after_init;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
#else
@@ -76,7 +77,7 @@ static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
static cpumask_t evtstrm_available = CPU_MASK_NONE;
-static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
+static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
static int __init early_evtstrm_cfg(char *buf)
{
@@ -176,7 +177,7 @@ static notrace u64 arch_counter_get_cntvct(void)
* to exist on arm64. arm doesn't use this before DT is probed so even
* if we don't have the cp15 accessors we won't have a problem.
*/
-u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
+u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct;
EXPORT_SYMBOL_GPL(arch_timer_read_counter);
static u64 arch_counter_read(struct clocksource *cs)
@@ -925,7 +926,7 @@ static int validate_timer_rate(void)
* rate was probed first, and don't verify that others match. If the first node
* probed has a clock-frequency property, this overrides the HW register.
*/
-static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
+static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np)
{
/* Who has more than one independent system counter? */
if (arch_timer_rate)
@@ -939,7 +940,7 @@ static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
pr_warn("frequency not available\n");
}
-static void arch_timer_banner(unsigned type)
+static void __init arch_timer_banner(unsigned type)
{
pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 996900d017c6..2fc93e46cea3 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -18,7 +18,7 @@
#define RATE_32K 32768
-#define TIMER_MODE_CONTINOUS 0x1
+#define TIMER_MODE_CONTINUOUS 0x1
#define TIMER_DOWNCOUNT_VAL 0xffffffff
#define PRCMU_TIMER_REF 0
@@ -55,13 +55,13 @@ static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
/*
* The A9 sub system expects the timer to be configured as
- * a continous looping timer.
+ * a continuous looping timer.
* The PRCMU should configure it but if it for some reason
* don't we do it here.
*/
if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) !=
- TIMER_MODE_CONTINOUS) {
- writel(TIMER_MODE_CONTINOUS,
+ TIMER_MODE_CONTINUOUS) {
+ writel(TIMER_MODE_CONTINUOUS,
clksrc_dbx500_timer_base + PRCMU_TIMER_MODE);
writel(TIMER_DOWNCOUNT_VAL,
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 42e7e43b8fcd..3819ef5b7098 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -38,7 +38,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
}
/*
- * Not all implementations use a periphal clock, so don't panic
+ * Not all implementations use a peripheral clock, so don't panic
* if it's not present
*/
pclk = of_clk_get_by_name(np, "pclk");
@@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
return 0;
timer_clk = of_clk_get_by_name(np, "timer");
- if (IS_ERR(timer_clk))
- return PTR_ERR(timer_clk);
+ if (IS_ERR(timer_clk)) {
+ ret = PTR_ERR(timer_clk);
+ goto out_pclk_disable;
+ }
ret = clk_prepare_enable(timer_clk);
if (ret)
- return ret;
+ goto out_timer_clk_put;
*rate = clk_get_rate(timer_clk);
- if (!(*rate))
- return -EINVAL;
+ if (!(*rate)) {
+ ret = -EINVAL;
+ goto out_timer_clk_disable;
+ }
return 0;
+
+out_timer_clk_disable:
+ clk_disable_unprepare(timer_clk);
+out_timer_clk_put:
+ clk_put(timer_clk);
+out_pclk_disable:
+ if (!IS_ERR(pclk)) {
+ clk_disable_unprepare(pclk);
+ clk_put(pclk);
+ }
+ iounmap(*base);
+ return ret;
}
static int __init add_clockevent(struct device_node *event_timer)
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 269a691bd2c4..977fd05ac35f 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -18,6 +18,9 @@
#include <linux/sched_clock.h>
#include <linux/mm.h>
#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
#include <clocksource/hyperv_timer.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -43,14 +46,13 @@ static u64 hv_sched_clock_offset __ro_after_init;
*/
static bool direct_mode_enabled;
-static int stimer0_irq;
-static int stimer0_vector;
+static int stimer0_irq = -1;
static int stimer0_message_sint;
+static DEFINE_PER_CPU(long, stimer0_evt);
/*
- * ISR for when stimer0 is operating in Direct Mode. Direct Mode
- * does not use VMbus or any VMbus messages, so process here and not
- * in the VMbus driver code.
+ * Common code for stimer0 interrupts coming via Direct Mode or
+ * as a VMbus message.
*/
void hv_stimer0_isr(void)
{
@@ -61,6 +63,16 @@ void hv_stimer0_isr(void)
}
EXPORT_SYMBOL_GPL(hv_stimer0_isr);
+/*
+ * stimer0 interrupt handler for architectures that support
+ * per-cpu interrupts, which also implies Direct Mode.
+ */
+static irqreturn_t hv_stimer0_percpu_isr(int irq, void *dev_id)
+{
+ hv_stimer0_isr();
+ return IRQ_HANDLED;
+}
+
static int hv_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -68,16 +80,16 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hv_read_reference_counter();
current_tick += delta;
- hv_init_timer(0, current_tick);
+ hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- hv_init_timer(0, 0);
- hv_init_timer_config(0, 0);
- if (direct_mode_enabled)
- hv_disable_stimer0_percpu_irq(stimer0_irq);
+ hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
+ hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
+ if (direct_mode_enabled && stimer0_irq >= 0)
+ disable_percpu_irq(stimer0_irq);
return 0;
}
@@ -95,8 +107,9 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
* on the specified hardware vector/IRQ.
*/
timer_cfg.direct_mode = 1;
- timer_cfg.apic_vector = stimer0_vector;
- hv_enable_stimer0_percpu_irq(stimer0_irq);
+ timer_cfg.apic_vector = HYPERV_STIMER0_VECTOR;
+ if (stimer0_irq >= 0)
+ enable_percpu_irq(stimer0_irq, IRQ_TYPE_NONE);
} else {
/*
* When it expires, the timer will generate a VMbus message,
@@ -105,7 +118,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = stimer0_message_sint;
}
- hv_init_timer_config(0, timer_cfg.as_uint64);
+ hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
@@ -169,10 +182,58 @@ int hv_stimer_cleanup(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
+/*
+ * These placeholders are overridden by arch specific code on
+ * architectures that need special setup of the stimer0 IRQ because
+ * they don't support per-cpu IRQs (such as x86/x64).
+ */
+void __weak hv_setup_stimer0_handler(void (*handler)(void))
+{
+};
+
+void __weak hv_remove_stimer0_handler(void)
+{
+};
+
+/* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */
+static int hv_setup_stimer0_irq(void)
+{
+ int ret;
+
+ ret = acpi_register_gsi(NULL, HYPERV_STIMER0_VECTOR,
+ ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH);
+ if (ret < 0) {
+ pr_err("Can't register Hyper-V stimer0 GSI. Error %d", ret);
+ return ret;
+ }
+ stimer0_irq = ret;
+
+ ret = request_percpu_irq(stimer0_irq, hv_stimer0_percpu_isr,
+ "Hyper-V stimer0", &stimer0_evt);
+ if (ret) {
+ pr_err("Can't request Hyper-V stimer0 IRQ %d. Error %d",
+ stimer0_irq, ret);
+ acpi_unregister_gsi(stimer0_irq);
+ stimer0_irq = -1;
+ }
+ return ret;
+}
+
+static void hv_remove_stimer0_irq(void)
+{
+ if (stimer0_irq == -1) {
+ hv_remove_stimer0_handler();
+ } else {
+ free_percpu_irq(stimer0_irq, &stimer0_evt);
+ acpi_unregister_gsi(stimer0_irq);
+ stimer0_irq = -1;
+ }
+}
+
/* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
-int hv_stimer_alloc(void)
+int hv_stimer_alloc(bool have_percpu_irqs)
{
- int ret = 0;
+ int ret;
/*
* Synthetic timers are always available except on old versions of
@@ -188,29 +249,37 @@ int hv_stimer_alloc(void)
direct_mode_enabled = ms_hyperv.misc_features &
HV_STIMER_DIRECT_MODE_AVAILABLE;
- if (direct_mode_enabled) {
- ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
- hv_stimer0_isr);
+
+ /*
+ * If Direct Mode isn't enabled, the remainder of the initialization
+ * is done later by hv_stimer_legacy_init()
+ */
+ if (!direct_mode_enabled)
+ return 0;
+
+ if (have_percpu_irqs) {
+ ret = hv_setup_stimer0_irq();
if (ret)
- goto free_percpu;
+ goto free_clock_event;
+ } else {
+ hv_setup_stimer0_handler(hv_stimer0_isr);
+ }
- /*
- * Since we are in Direct Mode, stimer initialization
- * can be done now with a CPUHP value in the same range
- * as other clockevent devices.
- */
- ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
- "clockevents/hyperv/stimer:starting",
- hv_stimer_init, hv_stimer_cleanup);
- if (ret < 0)
- goto free_stimer0_irq;
+ /*
+ * Since we are in Direct Mode, stimer initialization
+ * can be done now with a CPUHP value in the same range
+ * as other clockevent devices.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
+ "clockevents/hyperv/stimer:starting",
+ hv_stimer_init, hv_stimer_cleanup);
+ if (ret < 0) {
+ hv_remove_stimer0_irq();
+ goto free_clock_event;
}
return ret;
-free_stimer0_irq:
- hv_remove_stimer0_irq(stimer0_irq);
- stimer0_irq = 0;
-free_percpu:
+free_clock_event:
free_percpu(hv_clock_event);
hv_clock_event = NULL;
return ret;
@@ -254,23 +323,6 @@ void hv_stimer_legacy_cleanup(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup);
-
-/* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
-void hv_stimer_free(void)
-{
- if (!hv_clock_event)
- return;
-
- if (direct_mode_enabled) {
- cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
- hv_remove_stimer0_irq(stimer0_irq);
- stimer0_irq = 0;
- }
- free_percpu(hv_clock_event);
- hv_clock_event = NULL;
-}
-EXPORT_SYMBOL_GPL(hv_stimer_free);
-
/*
* Do a global cleanup of clockevents for the cases of kexec and
* vmbus exit
@@ -287,12 +339,17 @@ void hv_stimer_global_cleanup(void)
hv_stimer_legacy_cleanup(cpu);
}
- /*
- * If Direct Mode is enabled, the cpuhp teardown callback
- * (hv_stimer_cleanup) will be run on all CPUs to stop the
- * stimers.
- */
- hv_stimer_free();
+ if (!hv_clock_event)
+ return;
+
+ if (direct_mode_enabled) {
+ cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
+ hv_remove_stimer0_irq();
+ stimer0_irq = -1;
+ }
+ free_percpu(hv_clock_event);
+ hv_clock_event = NULL;
+
}
EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
@@ -302,14 +359,6 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
* the other that uses the TSC reference page feature as defined in the
* TLFS. The MSR version is for compatibility with old versions of
* Hyper-V and 32-bit x86. The TSC reference page version is preferred.
- *
- * The Hyper-V clocksource ratings of 250 are chosen to be below the
- * TSC clocksource rating of 300. In configurations where Hyper-V offers
- * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource
- * is available and preferred. With the higher rating, it will be the
- * default. On older hardware and Hyper-V versions, the TSC is marked
- * "unstable", so no TSC clocksource is created and the selected Hyper-V
- * clocksource will be the default.
*/
u64 (*hv_read_reference_counter)(void);
@@ -331,7 +380,7 @@ static u64 notrace read_hv_clock_tsc(void)
u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
if (current_tick == U64_MAX)
- hv_get_time_ref_count(current_tick);
+ current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT);
return current_tick;
}
@@ -352,9 +401,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
u64 tsc_msr;
/* Disable the TSC page */
- hv_get_reference_tsc(tsc_msr);
+ tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= ~BIT_ULL(0);
- hv_set_reference_tsc(tsc_msr);
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
}
@@ -364,39 +413,44 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
u64 tsc_msr;
/* Re-enable the TSC page */
- hv_get_reference_tsc(tsc_msr);
+ tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= GENMASK_ULL(11, 0);
tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
- hv_set_reference_tsc(tsc_msr);
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
}
+#ifdef VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{
- hv_enable_vdso_clocksource();
+ vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
return 0;
}
+#endif
static struct clocksource hyperv_cs_tsc = {
.name = "hyperv_clocksource_tsc_page",
- .rating = 250,
+ .rating = 500,
.read = read_hv_clock_tsc_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend= suspend_hv_clock_tsc,
.resume = resume_hv_clock_tsc,
+#ifdef VDSO_CLOCKMODE_HVCLOCK
.enable = hv_cs_enable,
+ .vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
+#else
+ .vdso_clock_mode = VDSO_CLOCKMODE_NONE,
+#endif
};
static u64 notrace read_hv_clock_msr(void)
{
- u64 current_tick;
/*
* Read the partition counter to get the current tick count. This count
* is set to 0 when the partition is created and is incremented in
* 100 nanosecond units.
*/
- hv_get_time_ref_count(current_tick);
- return current_tick;
+ return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
}
static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
@@ -412,12 +466,36 @@ static u64 notrace read_hv_sched_clock_msr(void)
static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr",
- .rating = 250,
+ .rating = 500,
.read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+/*
+ * Reference to pv_ops must be inline so objtool
+ * detection of noinstr violations can work correctly.
+ */
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+static __always_inline void hv_setup_sched_clock(void *sched_clock)
+{
+ /*
+ * We're on an architecture with generic sched clock (not x86/x64).
+ * The Hyper-V sched clock read function returns nanoseconds, not
+ * the normal 100ns units of the Hyper-V synthetic clock.
+ */
+ sched_clock_register(sched_clock, 64, NSEC_PER_SEC);
+}
+#elif defined CONFIG_PARAVIRT
+static __always_inline void hv_setup_sched_clock(void *sched_clock)
+{
+ /* We're on x86/x64 *and* using PV ops */
+ paravirt_set_sched_clock(sched_clock);
+}
+#else /* !CONFIG_GENERIC_SCHED_CLOCK && !CONFIG_PARAVIRT */
+static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
+#endif /* CONFIG_GENERIC_SCHED_CLOCK */
+
static bool __init hv_init_tsc_clocksource(void)
{
u64 tsc_msr;
@@ -429,6 +507,22 @@ static bool __init hv_init_tsc_clocksource(void)
if (hv_root_partition)
return false;
+ /*
+ * If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
+ * handles frequency and offset changes due to live migration,
+ * pause/resume, and other VM management operations. So lower the
+ * Hyper-V Reference TSC rating, causing the generic TSC to be used.
+ * TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
+ * TSC will be preferred over the virtualized ARM64 arch counter.
+ * While the Hyper-V MSR clocksource won't be used since the
+ * Reference TSC clocksource is present, change its rating as
+ * well for consistency.
+ */
+ if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+ hyperv_cs_tsc.rating = 250;
+ hyperv_cs_msr.rating = 250;
+ }
+
hv_read_reference_counter = read_hv_clock_tsc;
phys_addr = virt_to_phys(hv_get_tsc_page());
@@ -439,12 +533,11 @@ static bool __init hv_init_tsc_clocksource(void)
* (which already has at least the low 12 bits set to zero since
* it is page aligned). Also set the "enable" bit, which is bit 0.
*/
- hv_get_reference_tsc(tsc_msr);
+ tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= GENMASK_ULL(11, 0);
tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
- hv_set_reference_tsc(tsc_msr);
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
- hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
hv_sched_clock_offset = hv_read_reference_counter();
@@ -457,7 +550,7 @@ void __init hv_init_clocksource(void)
{
/*
* Try to set up the TSC page clocksource. If it succeeds, we're
- * done. Otherwise, set up the MSR clocksoruce. At least one of
+ * done. Otherwise, set up the MSR clocksource. At least one of
* these will always be available except on very old versions of
* Hyper-V on x86. In that case we won't have a Hyper-V
* clocksource, but Linux will still run with a clocksource based
diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
index 029efc2731b4..06d25754e606 100644
--- a/drivers/clocksource/ingenic-ost.c
+++ b/drivers/clocksource/ingenic-ost.c
@@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
return PTR_ERR(ost->regs);
map = device_node_to_regmap(dev->parent->of_node);
- if (!map) {
+ if (IS_ERR(map)) {
dev_err(dev, "regmap not found");
- return -EINVAL;
+ return PTR_ERR(map);
}
ost->clk = devm_clk_get(dev, "ost");
@@ -167,13 +167,14 @@ static const struct ingenic_ost_soc_info jz4725b_ost_soc_info = {
.is64bit = false,
};
-static const struct ingenic_ost_soc_info jz4770_ost_soc_info = {
+static const struct ingenic_ost_soc_info jz4760b_ost_soc_info = {
.is64bit = true,
};
static const struct of_device_id ingenic_ost_of_match[] = {
{ .compatible = "ingenic,jz4725b-ost", .data = &jz4725b_ost_soc_info, },
- { .compatible = "ingenic,jz4770-ost", .data = &jz4770_ost_soc_info, },
+ { .compatible = "ingenic,jz4760b-ost", .data = &jz4760b_ost_soc_info, },
+ { .compatible = "ingenic,jz4770-ost", .data = &jz4760b_ost_soc_info, },
{ }
};
diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 905fd6b163a8..24ed0f1f089b 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -264,6 +264,7 @@ static const struct ingenic_soc_info jz4725b_soc_info = {
static const struct of_device_id ingenic_tcu_of_match[] = {
{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
+ { .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
{ /* sentinel */ }
@@ -358,6 +359,7 @@ err_free_ingenic_tcu:
TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
+TIMER_OF_DECLARE(jz4760_tcu_intc, "ingenic,jz4760-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index c98f8851fd68..d7ed99f0001f 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -339,8 +339,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
SH_CMT16_CMCSR_CKS512);
} else {
- sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
- SH_CMT32_CMCSR_CMTOUT_IE |
+ u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ?
+ SH_CMT32_CMCSR_CMTOUT_IE : 0;
+ sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM |
SH_CMT32_CMCSR_CMR_IRQ |
SH_CMT32_CMCSR_CKS_RCLK8);
}
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
index 787dbebbb432..27af17c99590 100644
--- a/drivers/clocksource/timer-atmel-tcb.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -455,9 +455,9 @@ static int __init tcb_clksrc_init(struct device_node *node)
tcaddr = tc.regs;
if (bits == 32) {
- /* use apropriate function to read 32 bit counter */
+ /* use appropriate function to read 32 bit counter */
clksrc.read = tc_get_cycles32;
- /* setup ony channel 0 */
+ /* setup only channel 0 */
tcb_setup_single_chan(&tc, best_divisor_idx);
tc_sched_clock = tc_sched_clock_read32;
tc_delay_timer.read_current_timer = tc_delay_timer_read32;
diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c
index 12a2ed7cfaff..93f336ec875a 100644
--- a/drivers/clocksource/timer-fsl-ftm.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
@@ -116,7 +116,7 @@ static int ftm_set_next_event(unsigned long delta,
* to the MOD register latches the value into a buffer. The MOD
* register is updated with the value of its write buffer with
* the following scenario:
- * a, the counter source clock is diabled.
+ * a, the counter source clock is disabled.
*/
ftm_counter_disable(priv->clkevt_base);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index ab623b25a47b..cfa4ec7ef396 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -237,7 +237,7 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
break;
}
- /* Use the bigest prescaler if we didn't match one. */
+ /* Use the biggest prescaler if we didn't match one. */
if (*pres == MCHP_PIT64B_PRES_MAX)
*pres = MCHP_PIT64B_PRES_MAX - 1;
}
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c
index 9780ffd8010e..a00520cbb660 100644
--- a/drivers/clocksource/timer-npcm7xx.c
+++ b/drivers/clocksource/timer-npcm7xx.c
@@ -208,5 +208,6 @@ static int __init npcm7xx_timer_init(struct device_node *np)
return 0;
}
+TIMER_OF_DECLARE(wpcm450, "nuvoton,wpcm450-timer", npcm7xx_timer_init);
TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 572da477c6d3..529cc6a51cdb 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -211,10 +211,10 @@ out_fail:
}
/**
- * timer_of_cleanup - release timer_of ressources
+ * timer_of_cleanup - release timer_of resources
* @to: timer_of structure
*
- * Release the ressources that has been used in timer_of_init().
+ * Release the resources that has been used in timer_of_init().
* This function should be called in init error cases
*/
void __init timer_of_cleanup(struct timer_of *to)
diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c
index a2dd85d0c1d7..6f37181a8c63 100644
--- a/drivers/clocksource/timer-pistachio.c
+++ b/drivers/clocksource/timer-pistachio.c
@@ -71,7 +71,7 @@ static u64 notrace
pistachio_clocksource_read_cycles(struct clocksource *cs)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
- u32 counter, overflw;
+ u32 counter, overflow;
unsigned long flags;
/*
@@ -80,7 +80,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs)
*/
raw_spin_lock_irqsave(&pcs->lock, flags);
- overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
+ overflow = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
raw_spin_unlock_irqrestore(&pcs->lock, flags);
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index 33b3e8aa2cc5..b6f97960d8ee 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -2,6 +2,7 @@
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
struct dmtimer_systimer *t = &clkevt->t;
void __iomem *pend = t->base + t->pend;
- writel_relaxed(0xffffffff - cycles, t->base + t->counter);
while (readl_relaxed(pend) & WP_TCRR)
cpu_relax();
+ writel_relaxed(0xffffffff - cycles, t->base + t->counter);
- writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
while (readl_relaxed(pend) & WP_TCLR)
cpu_relax();
+ writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
return 0;
}
@@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
dmtimer_clockevent_shutdown(evt);
/* Looks like we need to first set the load value separately */
- writel_relaxed(clkevt->period, t->base + t->load);
while (readl_relaxed(pend) & WP_TLDR)
cpu_relax();
+ writel_relaxed(clkevt->period, t->base + t->load);
- writel_relaxed(clkevt->period, t->base + t->counter);
while (readl_relaxed(pend) & WP_TCRR)
cpu_relax();
+ writel_relaxed(clkevt->period, t->base + t->counter);
- writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
- t->base + t->ctrl);
while (readl_relaxed(pend) & WP_TCLR)
cpu_relax();
+ writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+ t->base + t->ctrl);
return 0;
}
@@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
}
-static int __init dmtimer_clockevent_init(struct device_node *np)
+static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
+ struct device_node *np,
+ unsigned int features,
+ const struct cpumask *cpumask,
+ const char *name,
+ int rating)
{
- struct dmtimer_clockevent *clkevt;
struct clock_event_device *dev;
struct dmtimer_systimer *t;
int error;
- clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
- if (!clkevt)
- return -ENOMEM;
-
t = &clkevt->t;
dev = &clkevt->dev;
@@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
* We mostly use cpuidle_coupled with ARM local timers for runtime,
* so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
*/
- dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- dev->rating = 300;
+ dev->features = features;
+ dev->rating = rating;
dev->set_next_event = dmtimer_set_next_event;
dev->set_state_shutdown = dmtimer_clockevent_shutdown;
dev->set_state_periodic = dmtimer_set_periodic;
dev->set_state_oneshot = dmtimer_clockevent_shutdown;
+ dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
dev->tick_resume = dmtimer_clockevent_shutdown;
- dev->cpumask = cpu_possible_mask;
+ dev->cpumask = cpumask;
dev->irq = irq_of_parse_and_map(np, 0);
- if (!dev->irq) {
- error = -ENXIO;
- goto err_out_free;
- }
+ if (!dev->irq)
+ return -ENXIO;
error = dmtimer_systimer_setup(np, &clkevt->t);
if (error)
- goto err_out_free;
+ return error;
clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
@@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
- IRQF_TIMER, "clockevent", clkevt);
+ IRQF_TIMER, name, clkevt);
if (error)
goto err_out_unmap;
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
- pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
- of_find_property(np, "ti,timer-alwon", NULL) ?
+ pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
+ name, of_find_property(np, "ti,timer-alwon", NULL) ?
"always-on " : "", t->rate, np->parent);
- clockevents_config_and_register(dev, t->rate,
- 3, /* Timer internal resynch latency */
+ return 0;
+
+err_out_unmap:
+ iounmap(t->base);
+
+ return error;
+}
+
+static int __init dmtimer_clockevent_init(struct device_node *np)
+{
+ struct dmtimer_clockevent *clkevt;
+ int error;
+
+ clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+ if (!clkevt)
+ return -ENOMEM;
+
+ error = dmtimer_clkevt_init_common(clkevt, np,
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ cpu_possible_mask, "clockevent",
+ 300);
+ if (error)
+ goto err_out_free;
+
+ clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
+ 3, /* Timer internal resync latency */
0xffffffff);
if (of_machine_is_compatible("ti,am33xx") ||
of_machine_is_compatible("ti,am43")) {
- dev->suspend = omap_clockevent_idle;
- dev->resume = omap_clockevent_unidle;
+ clkevt->dev.suspend = omap_clockevent_idle;
+ clkevt->dev.resume = omap_clockevent_unidle;
}
return 0;
-err_out_unmap:
- iounmap(t->base);
-
err_out_free:
kfree(clkevt);
return error;
}
+/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
+static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
+
+static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
+{
+ struct dmtimer_clockevent *clkevt;
+ int error;
+
+ if (!cpu_possible(cpu))
+ return -EINVAL;
+
+ if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
+ !of_property_read_bool(np->parent, "ti,no-idle"))
+ pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
+
+ clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
+
+ error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
+ cpumask_of(cpu), "percpu-dmtimer",
+ 500);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/* See TRM for timer internal resynch latency */
+static int omap_dmtimer_starting_cpu(unsigned int cpu)
+{
+ struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
+ struct clock_event_device *dev = &clkevt->dev;
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
+ irq_force_affinity(dev->irq, cpumask_of(cpu));
+
+ return 0;
+}
+
+static int __init dmtimer_percpu_timer_startup(void)
+{
+ struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ if (t->sysc) {
+ cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
+ "clockevents/omap/gptimer:starting",
+ omap_dmtimer_starting_cpu, NULL);
+ }
+
+ return 0;
+}
+subsys_initcall(dmtimer_percpu_timer_startup);
+
+static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
+{
+ struct device_node *arm_timer;
+
+ arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
+ if (of_device_is_available(arm_timer)) {
+ pr_warn_once("ARM architected timer wrap issue i940 detected\n");
+ return 0;
+ }
+
+ if (pa == 0x48034000) /* dra7 dmtimer3 */
+ return dmtimer_percpu_timer_init(np, 0);
+ else if (pa == 0x48036000) /* dra7 dmtimer4 */
+ return dmtimer_percpu_timer_init(np, 1);
+
+ return 0;
+}
+
/* Clocksource */
static struct dmtimer_clocksource *
to_dmtimer_clocksource(struct clocksource *cs)
@@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
if (clockevent == pa)
return dmtimer_clockevent_init(np);
+ if (of_machine_is_compatible("ti,dra7"))
+ return dmtimer_percpu_quirk_init(np, pa);
+
return 0;
}
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
index 1a86a4e7e344..911c92146eca 100644
--- a/drivers/clocksource/timer-vf-pit.c
+++ b/drivers/clocksource/timer-vf-pit.c
@@ -136,7 +136,7 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
/*
* The value for the LDVAL register trigger is calculated as:
* LDVAL trigger = (period / clock period) - 1
- * The pit is a 32-bit down count timer, when the conter value
+ * The pit is a 32-bit down count timer, when the counter value
* reaches 0, it will generate an interrupt, thus the minimal
* LDVAL trigger value is 1. And then the min_delta is
* minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index 856fb2045656..b8e75210a0e3 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
config CRYPTO_DEV_SUN8I_CE_HASH
bool "Enable support for hash on sun8i-ce"
depends on CRYPTO_DEV_SUN8I_CE
- select MD5
- select SHA1
- select SHA256
- select SHA512
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
help
Say y to enable support for hash algorithms.
@@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
config CRYPTO_DEV_SUN8I_SS_HASH
bool "Enable support for hash on sun8i-ss"
depends on CRYPTO_DEV_SUN8I_SS
- select MD5
- select SHA1
- select SHA256
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
help
Say y to enable support for hash algorithms.
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index c2e6f5ed1d79..dec79fa3ebaf 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
sizeof(struct sun4i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 709905ec4680..44b8fc4b786d 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev)
{
struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
- if (ss->reset)
- reset_control_assert(ss->reset);
+ reset_control_assert(ss->reset);
clk_disable_unprepare(ss->ssclk);
clk_disable_unprepare(ss->busclk);
@@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev)
goto err_enable;
}
- if (ss->reset) {
- err = reset_control_deassert(ss->reset);
- if (err) {
- dev_err(ss->dev, "Cannot deassert reset control\n");
- goto err_enable;
- }
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto err_enable;
}
return err;
@@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
- if (IS_ERR(ss->reset)) {
- if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
- return PTR_ERR(ss->reset);
+ if (IS_ERR(ss->reset))
+ return PTR_ERR(ss->reset);
+ if (!ss->reset)
dev_info(&pdev->dev, "no reset control found\n");
- ss->reset = NULL;
- }
/*
* Check that clock have the correct rates given in the datasheet
@@ -459,7 +454,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
* this info could be useful
*/
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index c1b4585e9bbc..d28292762b32 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
op->ss = algt->ss;
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0)
return err;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
index 443160a114bb..491fcb7b81b4 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
@@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
return err;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 33707a2e55ff..54ae8d16e493 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -240,11 +240,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
} else {
if (nr_sgs > 0)
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
}
theend_iv:
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 158422ff5695..00194d1d9ae6 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
if (err)
goto error_alg;
- err = pm_runtime_get_sync(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
if (err < 0)
goto error_alg;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 2f09a37306e2..88194718a806 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -405,7 +405,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index cfde9ee4356b..cd1baee424a1 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, dma_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
goto err_iv;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index ed2a69f82e1c..9ef1c85c4aaa 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -232,10 +232,13 @@ sgd_next:
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
}
theend_iv:
@@ -351,7 +354,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) {
dev_err(op->ss->dev, "pm error %d\n", err);
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index e0ddc684798d..80e89066dbd1 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
if (err)
goto error_alg;
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
goto error_alg;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 11cbcbc83a7b..3c073eb3db03 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
bf = (__le32 *)pad;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result)
+ if (!result) {
+ kfree(pad);
return -ENOMEM;
+ }
for (i = 0; i < MAX_SG; i++) {
rctx->t_dst[i].addr = 0;
@@ -432,14 +434,14 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- kfree(pad);
-
memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
- kfree(result);
theend:
+ kfree(pad);
+ kfree(result);
crypto_finalize_hash_request(engine, breq, err);
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index 08a1473b2145..3191527928e4 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ss->dev, dma_iv)) {
dev_err(ss->dev, "Cannot DMA MAP IV\n");
- return -EFAULT;
+ err = -EFAULT;
+ goto err_free;
}
dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
@@ -167,6 +168,7 @@ err_iv:
memcpy(ctx->seed, d + dlen, ctx->slen);
}
memzero_explicit(d, todo);
+err_free:
kfree(d);
return err;
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a3fa849b139a..ded732242732 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -115,7 +115,7 @@ int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
}
-/**
+/*
* AES Functions
*/
static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
@@ -374,7 +374,7 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
-/**
+/*
* AES-CCM Functions
*/
@@ -489,7 +489,7 @@ int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
}
-/**
+/*
* AES-GCM Functions
*/
@@ -617,7 +617,7 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
return crypto4xx_crypt_aes_gcm(req, true);
}
-/**
+/*
* HASH SHA1 Functions
*/
static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
@@ -711,7 +711,7 @@ int crypto4xx_hash_digest(struct ahash_request *req)
ctx->sa_len, 0, NULL);
}
-/**
+/*
* SHA1 Algorithm
*/
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 8d1b918a0533..8278d98074e9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -44,7 +44,7 @@
#define PPC4XX_SEC_VERSION_STR "0.5"
-/**
+/*
* PPC4xx Crypto Engine Initialization Routine
*/
static void crypto4xx_hw_init(struct crypto4xx_device *dev)
@@ -159,7 +159,7 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
ctx->sa_len = 0;
}
-/**
+/*
* alloc memory for the gather ring
* no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function
@@ -268,7 +268,7 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
return tail;
}
-/**
+/*
* alloc memory for the gather ring
* no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function
@@ -346,7 +346,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
return &dev->gdr[idx];
}
-/**
+/*
* alloc memory for the scatter ring
* need to alloc buf for the ring
* sdr_tail, sdr_head and sdr_count are initialized by this function
@@ -930,7 +930,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
return is_busy ? -EBUSY : -EINPROGRESS;
}
-/**
+/*
* Algorithm Registration Functions
*/
static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
@@ -1097,7 +1097,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
} while (head != tail);
}
-/**
+/*
* Top Half of isr.
*/
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
@@ -1186,7 +1186,7 @@ static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
return 0;
}
-/**
+/*
* Supported Crypto Algorithms
*/
static struct crypto4xx_alg_common crypto4xx_alg[] = {
@@ -1369,7 +1369,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
} },
};
-/**
+/*
* Module Initialization Routine
*/
static int crypto4xx_probe(struct platform_device *ofdev)
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index a4e25b46cd0a..56c10668c0ab 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -188,7 +188,7 @@ int crypto4xx_hash_final(struct ahash_request *req);
int crypto4xx_hash_update(struct ahash_request *req);
int crypto4xx_hash_init(struct ahash_request *req);
-/**
+/*
* Note: Only use this function to copy items that is word aligned.
*/
static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index c4c0a1a75941..1038061224da 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -104,7 +104,7 @@
#define CRYPTO4XX_PRNG_LFSR_L 0x00070030
#define CRYPTO4XX_PRNG_LFSR_H 0x00070034
-/**
+/*
* Initialize CRYPTO ENGINE registers, and memory bases.
*/
#define PPC4XX_PDR_POLL 0x3ff
@@ -123,7 +123,7 @@
#define PPC4XX_INT_TIMEOUT_CNT 0
#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
-/**
+/*
* all follow define are ad hoc
*/
#define PPC4XX_RING_RETRY 100
@@ -131,7 +131,7 @@
#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
-/**
+/*
* Generic Security Association (SA) with all possible fields. These will
* never likely used except for reference purpose. These structure format
* can be not changed as the hardware expects them to be layout as defined.
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
index fe756abfc19f..e98e4e7abbad 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -14,7 +14,7 @@
#define AES_IV_SIZE 16
-/**
+/*
* Contents of Dynamic Security Association (SA) with all possible fields
*/
union dynamic_sa_contents {
@@ -122,7 +122,7 @@ union sa_command_0 {
#define SA_AES_KEY_LEN_256 4
#define SA_REV2 1
-/**
+/*
* The follow defines bits sa_command_1
* In Basic hash mode this bit define simple hash or hmac.
* In IPsec mode, this bit define muting control.
@@ -172,7 +172,7 @@ struct dynamic_sa_ctl {
union sa_command_1 sa_command_1;
} __attribute__((packed));
-/**
+/*
* State Record for Security Association (SA)
*/
struct sa_state_record {
@@ -184,7 +184,7 @@ struct sa_state_record {
};
} __attribute__((packed));
-/**
+/*
* Security Association (SA) for AES128
*
*/
@@ -213,7 +213,7 @@ struct dynamic_sa_aes192 {
#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
#define SA_AES192_CONTENTS 0x3e000062
-/**
+/*
* Security Association (SA) for AES256
*/
struct dynamic_sa_aes256 {
@@ -228,7 +228,7 @@ struct dynamic_sa_aes256 {
#define SA_AES256_CONTENTS 0x3e000082
#define SA_AES_CONTENTS 0x3e000002
-/**
+/*
* Security Association (SA) for AES128 CCM
*/
struct dynamic_sa_aes128_ccm {
@@ -242,7 +242,7 @@ struct dynamic_sa_aes128_ccm {
#define SA_AES128_CCM_CONTENTS 0x3e000042
#define SA_AES_CCM_CONTENTS 0x3e000002
-/**
+/*
* Security Association (SA) for AES128_GCM
*/
struct dynamic_sa_aes128_gcm {
@@ -258,7 +258,7 @@ struct dynamic_sa_aes128_gcm {
#define SA_AES128_GCM_CONTENTS 0x3e000442
#define SA_AES_GCM_CONTENTS 0x3e000402
-/**
+/*
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
index 3af732f25c1c..7356716274cb 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.h
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index 8b5e07316352..c6865cbd334b 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -236,10 +236,10 @@ static int meson_cipher(struct skcipher_request *areq)
dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
if (areq->src == areq->dst) {
- dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
}
if (areq->iv && ivsize > 0) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 5bbeff433c8c..6e7ae896717c 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
struct meson_dev *mc;
int err, i;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 9bd8e5167be3..333fbefbbccb 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -26,7 +26,7 @@
static struct atmel_ecc_driver_data driver_data;
/**
- * atmel_ecdh_ctx - transformation context
+ * struct atmel_ecdh_ctx - transformation context
* @client : pointer to i2c client device
* @fallback : used for unsupported curves or when user wants to use its own
* private key.
@@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data;
* of the user to not call set_secret() while
* generate_public_key() or compute_shared_secret() are in flight.
* @curve_id : elliptic curve id
- * @n_sz : size in bytes of the n prime
* @do_fallback: true when the device doesn't support the curve or when the user
* wants to use its own private key.
*/
@@ -43,7 +42,6 @@ struct atmel_ecdh_ctx {
struct crypto_kpp *fallback;
const u8 *public_key;
unsigned int curve_id;
- size_t n_sz;
bool do_fallback;
};
@@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
int status)
{
struct kpp_request *req = areq;
- struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_i2c_cmd *cmd = &work_data->cmd;
size_t copied, n_sz;
@@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
goto free_work_data;
/* might want less than we've got */
- n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+ n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len);
/* copy the shared secret */
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
@@ -73,14 +70,6 @@ free_work_data:
kpp_request_complete(req, status);
}
-static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
-{
- if (curve_id == ECC_CURVE_NIST_P256)
- return ATMEL_ECC_NIST_P256_N_SIZE;
-
- return 0;
-}
-
/*
* A random private key is generated and stored in the device. The device
* returns the pair public key.
@@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
- if (!ctx->n_sz || params.key_size) {
+ if (params.key_size) {
/* fallback to ecdh software implementation */
ctx->do_fallback = true;
return crypto_kpp_set_secret(ctx->fallback, buf, len);
@@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
goto free_cmd;
ctx->do_fallback = false;
- ctx->curve_id = params.curve_id;
atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
@@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
struct crypto_kpp *fallback;
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->client = atmel_ecc_i2c_client_alloc();
if (IS_ERR(ctx->client)) {
pr_err("tfm - i2c_client binding failed\n");
@@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
return ATMEL_ECC_PUBKEY_SIZE;
}
-static struct kpp_alg atmel_ecdh = {
+static struct kpp_alg atmel_ecdh_nist_p256 = {
.set_secret = atmel_ecdh_set_secret,
.generate_public_key = atmel_ecdh_generate_public_key,
.compute_shared_secret = atmel_ecdh_compute_shared_secret,
@@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = {
.max_size = atmel_ecdh_max_size,
.base = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
- .cra_name = "ecdh",
+ .cra_name = "ecdh-nist-p256",
.cra_driver_name = "atmel-ecdh",
.cra_priority = ATMEL_ECC_PRIORITY,
.cra_module = THIS_MODULE,
@@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client,
&driver_data.i2c_client_list);
spin_unlock(&driver_data.i2c_list_lock);
- ret = crypto_register_kpp(&atmel_ecdh);
+ ret = crypto_register_kpp(&atmel_ecdh_nist_p256);
if (ret) {
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
dev_err(&client->dev, "%s alg registration failed\n",
- atmel_ecdh.base.cra_driver_name);
+ atmel_ecdh_nist_p256.base.cra_driver_name);
} else {
dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
}
@@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client)
return -EBUSY;
}
- crypto_unregister_kpp(&atmel_ecdh);
+ crypto_unregister_kpp(&atmel_ecdh_nist_p256);
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index e8e8281e027d..6fd3e969211d 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -339,7 +339,7 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
if (bus_clk_rate > 1000000L) {
- dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+ dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n",
bus_clk_rate);
return -EINVAL;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 352d80cb5ae9..1b13f601fd95 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -434,7 +434,7 @@ static int atmel_sha_init(struct ahash_request *req)
ctx->flags = 0;
- dev_dbg(dd->dev, "init: digest size: %d\n",
+ dev_dbg(dd->dev, "init: digest size: %u\n",
crypto_ahash_digestsize(tfm));
switch (crypto_ahash_digestsize(tfm)) {
@@ -1102,7 +1102,7 @@ static int atmel_sha_start(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
int err;
- dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n",
ctx->op, req->nbytes);
err = atmel_sha_hw_init(dd);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 4d63cb13a54f..6f01c51e3c37 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1217,7 +1217,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
- dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base);
goto err_tasklet_kill;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 851b149f7170..053315e260c2 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1019,6 +1019,7 @@ static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
* a SPU response message for an AEAD request. Includes buffers to catch SPU
* message headers and the response data.
* @mssg: mailbox message containing the receive sg
+ * @req: Crypto API request
* @rctx: crypto request context
* @rx_frag_num: number of scatterlist elements required to hold the
* SPU response message
@@ -2952,9 +2953,9 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
/**
* rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
- * cipher: AEAD structure
- * key: Key followed by 4 bytes of salt
- * keylen: Length of key plus salt, in bytes
+ * @cipher: AEAD structure
+ * @key: Key followed by 4 bytes of salt
+ * @keylen: Length of key plus salt, in bytes
*
* Extracts salt from key and stores it to be prepended to IV on each request.
* Digest is always 16 bytes
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 007abf92cc05..6283e8c6d51d 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -457,7 +457,7 @@ u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
* @cipher_mode: Algo type
* @data_size: Length of plaintext (bytes)
*
- * @Return: Length of padding, in bytes
+ * Return: Length of padding, in bytes
*/
u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
unsigned int data_size)
@@ -510,10 +510,10 @@ u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
}
/**
- * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+ * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included
* in a SPU request after the AAD and before the payload.
* @cipher_mode: cipher mode
- * @iv_ctr_len: initialization vector length in bytes
+ * @iv_len: initialization vector length in bytes
*
* In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
* to include the IV as a separate field in the SPU request msg.
@@ -543,9 +543,9 @@ enum hash_type spum_hash_type(u32 src_sent)
/**
* spum_digest_size() - Determine the size of a hash digest to expect the SPU to
* return.
- * alg_digest_size: Number of bytes in the final digest for the given algo
- * alg: The hash algorithm
- * htype: Type of hash operation (init, update, full, etc)
+ * @alg_digest_size: Number of bytes in the final digest for the given algo
+ * @alg: The hash algorithm
+ * @htype: Type of hash operation (init, update, full, etc)
*
* When doing incremental hashing for an algorithm with a truncated hash
* (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
@@ -580,7 +580,7 @@ u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
* @aead_parms: Parameters related to AEAD operation
* @data_size: Length of data to be encrypted or authenticated. If AEAD, does
* not include length of AAD.
-
+ *
* Return: the length of the SPU header in bytes. 0 if an error occurs.
*/
u32 spum_create_request(u8 *spu_hdr,
@@ -911,7 +911,7 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
* setkey() time in spu_cipher_req_init().
* @spu_hdr: Start of the request message header (MH field)
* @spu_req_hdr_len: Length in bytes of the SPU request header
- * @isInbound: 0 encrypt, 1 decrypt
+ * @is_inbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
* @data_size: Length of the data in the BD field
*
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 2db35b5ccaa2..07989bb8c220 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -543,7 +543,8 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/**
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for
* subsequent skcipher requests for this context.
- * @spu2_cipher_type: Cipher algorithm
+ * @fmd: Start of FMD field to be written
+ * @spu2_type: Cipher algorithm
* @spu2_mode: Cipher mode
* @cipher_key_len: Length of cipher key, in bytes
* @cipher_iv_len: Length of cipher initialization vector, in bytes
@@ -598,7 +599,7 @@ static int spu2_fmd_init(struct SPU2_FMD *fmd,
* SPU request packet.
* @fmd: Start of FMD field to be written
* @is_inbound: true if decrypting. false if encrypting.
- * @authFirst: true if alg authenticates before encrypting
+ * @auth_first: true if alg authenticates before encrypting
* @protocol: protocol selector
* @cipher_type: cipher algorithm
* @cipher_mode: cipher mode
@@ -640,6 +641,7 @@ static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
* spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
* SPU request packet.
* @fmd: Start of FMD field to be written
+ * @is_inbound: true if decrypting. false if encrypting.
* @assoc_size: Length of additional associated data, in bytes
* @auth_key_len: Length of authentication key, in bytes
* @cipher_key_len: Length of cipher key, in bytes
@@ -793,7 +795,7 @@ u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
}
/**
- * spu_payload_length() - Given a SPU2 message header, extract the payload
+ * spu2_payload_length() - Given a SPU2 message header, extract the payload
* length.
* @spu_hdr: Start of SPU message header (FMD)
*
@@ -812,10 +814,11 @@ u32 spu2_payload_length(u8 *spu_hdr)
}
/**
- * spu_response_hdr_len() - Determine the expected length of a SPU response
+ * spu2_response_hdr_len() - Determine the expected length of a SPU response
* header.
* @auth_key_len: Length of authentication key, in bytes
* @enc_key_len: Length of encryption key, in bytes
+ * @is_hash: Unused
*
* For SPU2, includes just FMD. OMD is never requested.
*
@@ -827,7 +830,7 @@ u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
}
/**
- * spu_hash_pad_len() - Calculate the length of hash padding required to extend
+ * spu2_hash_pad_len() - Calculate the length of hash padding required to extend
* data to a full block size.
* @hash_alg: hash algorithm
* @hash_mode: hash mode
@@ -845,8 +848,10 @@ u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
}
/**
- * spu2_gcm_ccm_padlen() - Determine the length of GCM/CCM padding for either
+ * spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either
* the AAD field or the data.
+ * @cipher_mode: Unused
+ * @data_size: Unused
*
* Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
*/
@@ -857,7 +862,7 @@ u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
}
/**
- * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
+ * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
* associated data in a SPU2 output packet.
* @cipher_mode: cipher mode
* @assoc_len: length of additional associated data, in bytes
@@ -878,11 +883,11 @@ u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
return resp_len;
}
-/*
- * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+/**
+ * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included
* in a SPU request after the AAD and before the payload.
* @cipher_mode: cipher mode
- * @iv_ctr_len: initialization vector length in bytes
+ * @iv_len: initialization vector length in bytes
*
* For SPU2, AEAD IV is included in OMD and does not need to be repeated
* prior to the payload.
@@ -909,9 +914,9 @@ enum hash_type spu2_hash_type(u32 src_sent)
/**
* spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
* return.
- * alg_digest_size: Number of bytes in the final digest for the given algo
- * alg: The hash algorithm
- * htype: Type of hash operation (init, update, full, etc)
+ * @alg_digest_size: Number of bytes in the final digest for the given algo
+ * @alg: The hash algorithm
+ * @htype: Type of hash operation (init, update, full, etc)
*
*/
u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
@@ -921,7 +926,7 @@ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
}
/**
- * spu_create_request() - Build a SPU2 request message header, includint FMD and
+ * spu2_create_request() - Build a SPU2 request message header, includint FMD and
* OMD.
* @spu_hdr: Start of buffer where SPU request header is to be written
* @req_opts: SPU request message options
@@ -1105,7 +1110,7 @@ u32 spu2_create_request(u8 *spu_hdr,
}
/**
- * spu_cipher_req_init() - Build an skcipher SPU2 request message header,
+ * spu2_cipher_req_init() - Build an skcipher SPU2 request message header,
* including FMD and OMD.
* @spu_hdr: Location of start of SPU request (FMD field)
* @cipher_parms: Parameters describing cipher request
@@ -1162,11 +1167,11 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
}
/**
- * spu_cipher_req_finish() - Finish building a SPU request message header for a
+ * spu2_cipher_req_finish() - Finish building a SPU request message header for a
* block cipher request.
* @spu_hdr: Start of the request message header (MH field)
* @spu_req_hdr_len: Length in bytes of the SPU request header
- * @isInbound: 0 encrypt, 1 decrypt
+ * @is_inbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
* @data_size: Length of the data in the BD field
*
@@ -1222,7 +1227,7 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
}
/**
- * spu_request_pad() - Create pad bytes at the end of the data.
+ * spu2_request_pad() - Create pad bytes at the end of the data.
* @pad_start: Start of buffer where pad bytes are to be written
* @gcm_padding: Length of GCM padding, in bytes
* @hash_pad_len: Number of bytes of padding extend data to full block
@@ -1311,7 +1316,7 @@ u8 spu2_rx_status_len(void)
}
/**
- * spu_status_process() - Process the status from a SPU response message.
+ * spu2_status_process() - Process the status from a SPU response message.
* @statp: start of STATUS word
*
* Return: 0 - if status is good and response should be processed
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index c4669a96eaec..d5d9cabea55a 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -119,8 +119,8 @@ int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
* @from_skip: number of bytes to skip in from_sg. Non-zero when previous
* request included part of the buffer in entry in from_sg.
* Assumes from_skip < from_sg->length.
- * @from_nents number of entries in from_sg
- * @length number of bytes to copy. may reach this limit before exhausting
+ * @from_nents: number of entries in from_sg
+ * @length: number of bytes to copy. may reach this limit before exhausting
* from_sg.
*
* Copies the entries themselves, not the data in the entries. Assumes to_sg has
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index a780e627838a..8b8ed77d8715 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -71,6 +71,9 @@ struct caam_skcipher_alg {
* @adata: authentication algorithm details
* @cdata: encryption algorithm details
* @authsize: authentication tag (a.k.a. ICV / MAC) size
+ * @xts_key_fallback: true if fallback tfm needs to be used due
+ * to unsupported xts key lengths
+ * @fallback: xts fallback tfm
*/
struct caam_ctx {
struct caam_flc flc[NUM_OP];
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index dd5f101e43f8..e313233ec6de 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -187,7 +187,8 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
}
/**
- * Count leading zeros, need it to strip, from a given scatterlist
+ * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
+ * from a given scatterlist
*
* @sgl : scatterlist to count zeros from
* @nbytes: number of zeros, in bytes, to strip
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 711b1acdd4e0..06ee42e8a245 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -10,7 +10,6 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/printk.h>
-#include <linux/version.h>
#include "cptpf.h"
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index 99b053094f5a..c288c4b51783 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -10,7 +10,7 @@
#include "nitrox_isr.h"
#include "nitrox_mbx.h"
-/**
+/*
* One vector for each type of ring
* - NPS packet ring, AQMQ ring and ZQMQ ring
*/
@@ -216,7 +216,7 @@ static void nps_core_int_tasklet(unsigned long data)
}
}
-/**
+/*
* nps_core_int_isr - interrupt handler for NITROX errors and
* mailbox communication
*/
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 53ef06792133..df95ba26b414 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -58,14 +58,15 @@ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
struct device *dev = DEV(ndev);
- dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg),
+ DMA_BIDIRECTIONAL);
dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
DMA_TO_DEVICE);
kfree(sr->in.sgcomp);
sr->in.sg = NULL;
sr->in.sgmap_cnt = 0;
- dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
+ dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg),
DMA_BIDIRECTIONAL);
dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
DMA_TO_DEVICE);
@@ -178,7 +179,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
return 0;
incomp_err:
- dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
sr->in.sgmap_cnt = 0;
return ret;
}
@@ -203,7 +204,7 @@ static int dma_map_outbufs(struct nitrox_softreq *sr,
return 0;
outcomp_map_err:
- dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL);
sr->out.sgmap_cnt = 0;
sr->out.sg = NULL;
return ret;
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
index 58fb3ed6e644..54f6fb054119 100644
--- a/drivers/crypto/cavium/zip/common.h
+++ b/drivers/crypto/cavium/zip/common.h
@@ -56,7 +56,6 @@
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/version.h>
/* Device specific zlib function definitions */
#include "zip_device.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 88275b4867ea..5976530c00a8 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -59,7 +59,7 @@ struct ccp_crypto_queue {
#define CCP_CRYPTO_MAX_QLEN 100
static struct ccp_crypto_queue req_queue;
-static spinlock_t req_queue_lock;
+static DEFINE_SPINLOCK(req_queue_lock);
struct ccp_crypto_cmd {
struct list_head entry;
@@ -410,7 +410,6 @@ static int ccp_crypto_init(void)
return ret;
}
- spin_lock_init(&req_queue_lock);
INIT_LIST_HEAD(&req_queue.cmds);
req_queue.backlog = &req_queue.cmds;
req_queue.cmd_count = 0;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 0971ee60f840..6777582aa1ce 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -548,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
-int ccp_dev_suspend(struct sp_device *sp)
+void ccp_dev_suspend(struct sp_device *sp)
{
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags;
@@ -556,7 +556,7 @@ int ccp_dev_suspend(struct sp_device *sp)
/* If there's no device there's nothing to do */
if (!ccp)
- return 0;
+ return;
spin_lock_irqsave(&ccp->cmd_lock, flags);
@@ -572,11 +572,9 @@ int ccp_dev_suspend(struct sp_device *sp)
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
-
- return 0;
}
-int ccp_dev_resume(struct sp_device *sp)
+void ccp_dev_resume(struct sp_device *sp)
{
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags;
@@ -584,7 +582,7 @@ int ccp_dev_resume(struct sp_device *sp)
/* If there's no device there's nothing to do */
if (!ccp)
- return 0;
+ return;
spin_lock_irqsave(&ccp->cmd_lock, flags);
@@ -597,8 +595,6 @@ int ccp_dev_resume(struct sp_device *sp)
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
}
int ccp_dev_init(struct sp_device *sp)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d6a8f4e4b14a..bb88198c874e 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -2418,7 +2418,6 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.address += CCP_ECC_OUTPUT_SIZE;
ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
CCP_ECC_MODULUS_BYTES);
- dst.address += CCP_ECC_OUTPUT_SIZE;
/* Restore the workarea address */
dst.address = save;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index cb9b4c4e371e..da3872c48308 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -21,6 +21,7 @@
#include <linux/ccp.h>
#include <linux/firmware.h>
#include <linux/gfp.h>
+#include <linux/cpufeature.h>
#include <asm/smp.h>
@@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp)
struct sev_device *sev;
int ret = -ENOMEM;
+ if (!boot_cpu_has(X86_FEATURE_SEV)) {
+ dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n");
+ return 0;
+ }
+
sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
if (!sev)
goto e_err;
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index 6284a15e5047..7eb3e4668286 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -213,12 +213,8 @@ void sp_destroy(struct sp_device *sp)
int sp_suspend(struct sp_device *sp)
{
- int ret;
-
if (sp->dev_vdata->ccp_vdata) {
- ret = ccp_dev_suspend(sp);
- if (ret)
- return ret;
+ ccp_dev_suspend(sp);
}
return 0;
@@ -226,12 +222,8 @@ int sp_suspend(struct sp_device *sp)
int sp_resume(struct sp_device *sp)
{
- int ret;
-
if (sp->dev_vdata->ccp_vdata) {
- ret = ccp_dev_resume(sp);
- if (ret)
- return ret;
+ ccp_dev_resume(sp);
}
return 0;
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 0218d0670eee..20377e67f65d 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -134,8 +134,8 @@ struct sp_device *sp_get_psp_master_device(void);
int ccp_dev_init(struct sp_device *sp);
void ccp_dev_destroy(struct sp_device *sp);
-int ccp_dev_suspend(struct sp_device *sp);
-int ccp_dev_resume(struct sp_device *sp);
+void ccp_dev_suspend(struct sp_device *sp);
+void ccp_dev_resume(struct sp_device *sp);
#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
@@ -144,15 +144,8 @@ static inline int ccp_dev_init(struct sp_device *sp)
return 0;
}
static inline void ccp_dev_destroy(struct sp_device *sp) { }
-
-static inline int ccp_dev_suspend(struct sp_device *sp)
-{
- return 0;
-}
-static inline int ccp_dev_resume(struct sp_device *sp)
-{
- return 0;
-}
+static inline void ccp_dev_suspend(struct sp_device *sp) { }
+static inline void ccp_dev_resume(struct sp_device *sp) { }
#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f471dbaef1fb..f468594ef8af 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -356,6 +356,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
+ { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
index 5e697a90ea7f..5c9d47f3be37 100644
--- a/drivers/crypto/ccp/tee-dev.c
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -5,7 +5,7 @@
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
* Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*/
#include <linux/types.h>
@@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
if (!start_addr)
return -ENOMEM;
+ memset(start_addr, 0x0, ring_size);
rb_mgr->ring_start = start_addr;
rb_mgr->ring_size = ring_size;
rb_mgr->ring_pa = __psp_pa(start_addr);
@@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
void *buf, size_t len, struct tee_ring_cmd **resp)
{
struct tee_ring_cmd *cmd;
- u32 rptr, wptr;
int nloop = 1000, ret = 0;
+ u32 rptr;
*resp = NULL;
mutex_lock(&tee->rb_mgr.mutex);
- wptr = tee->rb_mgr.wptr;
-
- /* Check if ring buffer is full */
+ /* Loop until empty entry found in ring buffer */
do {
+ /* Get pointer to ring buffer command entry */
+ cmd = (struct tee_ring_cmd *)
+ (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
+
rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
- if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+ /* Check if ring buffer is full or command entry is waiting
+ * for response from TEE
+ */
+ if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
+ cmd->flag == CMD_WAITING_FOR_RESPONSE))
break;
- dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
- rptr, wptr);
+ dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, tee->rb_mgr.wptr);
- /* Wait if ring buffer is full */
+ /* Wait if ring buffer is full or TEE is processing data */
mutex_unlock(&tee->rb_mgr.mutex);
schedule_timeout_interruptible(msecs_to_jiffies(10));
mutex_lock(&tee->rb_mgr.mutex);
} while (--nloop);
- if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
- dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
- rptr, wptr);
+ if (!nloop &&
+ (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
+ cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
+ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
+ rptr, tee->rb_mgr.wptr, cmd->flag);
ret = -EBUSY;
goto unlock;
}
- /* Pointer to empty data entry in ring buffer */
- cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+ /* Do not submit command if PSP got disabled while processing any
+ * command in another thread
+ */
+ if (psp_dead) {
+ ret = -EBUSY;
+ goto unlock;
+ }
/* Write command data into ring buffer */
cmd->cmd_id = cmd_id;
@@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
memset(&cmd->buf[0], 0, sizeof(cmd->buf));
memcpy(&cmd->buf[0], buf, len);
+ /* Indicate driver is waiting for response */
+ cmd->flag = CMD_WAITING_FOR_RESPONSE;
+
/* Update local copy of write pointer */
tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
@@ -309,14 +326,14 @@ static int tee_wait_cmd_completion(struct psp_tee_device *tee,
struct tee_ring_cmd *resp,
unsigned int timeout)
{
- /* ~5ms sleep per loop => nloop = timeout * 200 */
- int nloop = timeout * 200;
+ /* ~1ms sleep per loop => nloop = timeout * 1000 */
+ int nloop = timeout * 1000;
while (--nloop) {
if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
return 0;
- usleep_range(5000, 5100);
+ usleep_range(1000, 1100);
}
dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
@@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
return ret;
ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
- if (ret)
+ if (ret) {
+ resp->flag = CMD_RESPONSE_TIMEDOUT;
return ret;
+ }
memcpy(buf, &resp->buf[0], len);
*status = resp->status;
+ resp->flag = CMD_RESPONSE_COPIED;
+
return 0;
}
EXPORT_SYMBOL(psp_tee_process_cmd);
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
index f09960112115..49d26158b71e 100644
--- a/drivers/crypto/ccp/tee-dev.h
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
* Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
@@ -18,7 +18,7 @@
#include <linux/mutex.h>
#define TEE_DEFAULT_TIMEOUT 10
-#define MAX_BUFFER_SIZE 992
+#define MAX_BUFFER_SIZE 988
/**
* enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
@@ -82,6 +82,20 @@ enum tee_cmd_state {
};
/**
+ * enum cmd_resp_state - TEE command's response status maintained by driver
+ * @CMD_RESPONSE_INVALID: initial state when no command is written to ring
+ * @CMD_WAITING_FOR_RESPONSE: driver waiting for response from TEE
+ * @CMD_RESPONSE_TIMEDOUT: failed to get response from TEE
+ * @CMD_RESPONSE_COPIED: driver has copied response from TEE
+ */
+enum cmd_resp_state {
+ CMD_RESPONSE_INVALID,
+ CMD_WAITING_FOR_RESPONSE,
+ CMD_RESPONSE_TIMEDOUT,
+ CMD_RESPONSE_COPIED,
+};
+
+/**
* struct tee_ring_cmd - Structure of the command buffer in TEE ring
* @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
* interface
@@ -91,6 +105,7 @@ enum tee_cmd_state {
* @pdata: private data (currently unused)
* @res1: reserved region
* @buf: TEE command specific buffer
+ * @flag: refers to &enum cmd_resp_state
*/
struct tee_ring_cmd {
u32 cmd_id;
@@ -100,6 +115,7 @@ struct tee_ring_cmd {
u64 pdata;
u32 res1[2];
u8 buf[MAX_BUFFER_SIZE];
+ u32 flag;
/* Total size: 1024 bytes */
} __packed;
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index d0e59e942568..e599ac6dc162 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -352,10 +352,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
- if (IS_ERR(new_drvdata->cc_base)) {
- dev_err(dev, "Failed to ioremap registers");
+ if (IS_ERR(new_drvdata->cc_base))
return PTR_ERR(new_drvdata->cc_base);
- }
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
req_mem_cc_regs);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index f5a336634daa..6933546f87b1 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -126,11 +126,6 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
return container_of(ctx->dev, struct uld_ctx, dev);
}
-static inline int is_ofld_imm(const struct sk_buff *skb)
-{
- return (skb->len <= SGE_MAX_WR_LEN);
-}
-
static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
{
memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
@@ -769,13 +764,14 @@ static inline void create_wreq(struct chcr_context *ctx,
struct uld_ctx *u_ctx = ULD_CTX(ctx);
unsigned int tx_channel_id, rx_channel_id;
unsigned int txqidx = 0, rxqidx = 0;
- unsigned int qid, fid;
+ unsigned int qid, fid, portno;
get_qidxs(req, &txqidx, &rxqidx);
qid = u_ctx->lldi.rxq_ids[rxqidx];
fid = u_ctx->lldi.rxq_ids[0];
+ portno = rxqidx / ctx->rxq_perchan;
tx_channel_id = txqidx / ctx->txq_perchan;
- rx_channel_id = rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
@@ -797,15 +793,13 @@ static inline void create_wreq(struct chcr_context *ctx,
/**
* create_cipher_wr - form the WR for cipher operations
- * @req: cipher req.
- * @ctx: crypto driver context of the request.
- * @qid: ingress qid where response of this WR should be received.
- * @op_type: encryption or decryption
+ * @wrparam: Container for create_cipher_wr()'s parameters
*/
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
@@ -822,6 +816,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst);
dst_size = get_space_for_phys_dsgl(nents);
@@ -1559,7 +1554,8 @@ static inline void chcr_free_shash(struct crypto_shash *base_hash)
/**
* create_hash_wr - Create hash work request
- * @req - Cipher req base
+ * @req: Cipher req base
+ * @param: Container for create_hash_wr()'s parameters
*/
static struct sk_buff *create_hash_wr(struct ahash_request *req,
struct hash_wr_param *param)
@@ -1580,6 +1576,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
int error = 0;
unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
param->sg_len) <= SGE_MAX_WR_LEN;
@@ -2438,6 +2435,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2457,6 +2455,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (req->cryptlen == 0)
return NULL;
@@ -2710,9 +2709,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
u32 temp;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
temp = req->assoclen + req->cryptlen +
@@ -2752,9 +2753,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct dsgl_walk dsgl_walk;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
reqctx->dst_ofst);
@@ -2958,6 +2961,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
@@ -2967,6 +2971,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen = req->assoclen - 8;
else
@@ -3127,6 +3133,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
@@ -3143,6 +3150,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
assoclen = req->assoclen - 8;
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index f91f9d762a45..39c70e6255f9 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -1,4 +1,4 @@
-/**
+/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
@@ -184,7 +184,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
+ pr_info_once("%s\n", DRV_DESC);
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
return ERR_PTR(-EOPNOTSUPP);
@@ -309,4 +309,3 @@ module_exit(chcr_crypto_exit);
MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index b02f981e7c32..f7c8bb95a71b 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -44,7 +44,6 @@
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
-#define DRV_VERSION "1.0.0.0-ko"
#define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
#define MAX_PENDING_REQ_TO_HW 20
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 4ee010f39912..fa5a9f207bc9 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -21,7 +21,7 @@
/* Static structures */
static void __iomem *_iobase;
-static spinlock_t lock;
+static DEFINE_SPINLOCK(lock);
/* Write a 128 bit field (either a writable key or IV) */
static inline void
@@ -383,8 +383,6 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto erequest;
}
- spin_lock_init(&lock);
-
/* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 843192666dc3..e572f9982d4e 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -68,6 +68,8 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
+ select CRYPTO_CURVE25519
+ select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)
accelerator, which can accelerate RSA and DH algorithms.
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 181c109b19f7..e0b4a1982ee9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -10,6 +10,14 @@
#define HPRE_PF_DEF_Q_NUM 64
#define HPRE_PF_DEF_Q_BASE 0
+/*
+ * type used in qm sqc DW6.
+ * 0 - Algorithm which has been supported in V2, like RSA, DH and so on;
+ * 1 - ECC algorithm in V3.
+ */
+#define HPRE_V2_ALG_TYPE 0
+#define HPRE_V3_ECC_ALG_TYPE 1
+
enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
@@ -18,7 +26,6 @@ enum {
};
enum hpre_ctrl_dbgfs_file {
- HPRE_CURRENT_QM,
HPRE_CLEAR_ENABLE,
HPRE_CLUSTER_CTRL,
HPRE_DEBUG_FILE_NUM,
@@ -75,6 +82,9 @@ enum hpre_alg_type {
HPRE_ALG_KG_CRT = 0x3,
HPRE_ALG_DH_G2 = 0x4,
HPRE_ALG_DH = 0x5,
+ HPRE_ALG_ECC_MUL = 0xD,
+ /* shared by x25519 and x448, but x448 is not supported now */
+ HPRE_ALG_CURVE25519_MUL = 0x10,
};
struct hpre_sqe {
@@ -92,8 +102,8 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hisi_qp *hpre_create_qp(void);
-int hpre_algs_register(void);
-void hpre_algs_unregister(void);
+struct hisi_qp *hpre_create_qp(u8 type);
+int hpre_algs_register(struct hisi_qm *qm);
+void hpre_algs_unregister(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a87f9904087a..a380087c83f7 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
+#include <crypto/curve25519.h>
#include <crypto/dh.h>
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/rsa.h>
@@ -36,6 +39,13 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+/* size in bytes of the n prime */
+#define HPRE_ECC_NIST_P192_N_SIZE 24
+#define HPRE_ECC_NIST_P256_N_SIZE 32
+
+/* size in bytes */
+#define HPRE_ECC_HW256_KSZ_B 32
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -61,14 +71,35 @@ struct hpre_dh_ctx {
* else if base if the counterpart public key we
* compute the shared secret
* ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+ * low address: d--->n, please refer to Hisilicon HPRE UM
*/
- char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
+ char *xa_p;
dma_addr_t dma_xa_p;
char *g; /* m */
dma_addr_t dma_g;
};
+struct hpre_ecdh_ctx {
+ /* low address: p->a->k->b */
+ unsigned char *p;
+ dma_addr_t dma_p;
+
+ /* low address: x->y */
+ unsigned char *g;
+ dma_addr_t dma_g;
+};
+
+struct hpre_curve25519_ctx {
+ /* low address: p->a->k */
+ unsigned char *p;
+ dma_addr_t dma_p;
+
+ /* gx coordinate */
+ unsigned char *g;
+ dma_addr_t dma_g;
+};
+
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
@@ -80,7 +111,11 @@ struct hpre_ctx {
union {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
+ struct hpre_ecdh_ctx ecdh;
+ struct hpre_curve25519_ctx curve25519;
};
+ /* for ecc algorithms */
+ unsigned int curve_id;
};
struct hpre_asym_request {
@@ -91,6 +126,8 @@ struct hpre_asym_request {
union {
struct akcipher_request *rsa;
struct kpp_request *dh;
+ struct kpp_request *ecdh;
+ struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -152,12 +189,12 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
}
}
-static struct hisi_qp *hpre_get_qp_and_start(void)
+static struct hisi_qp *hpre_get_qp_and_start(u8 type)
{
struct hisi_qp *qp;
int ret;
- qp = hpre_create_qp();
+ qp = hpre_create_qp(type);
if (!qp) {
pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
@@ -261,8 +298,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
- if (unlikely(!tmp))
- return;
if (src) {
if (req->src)
@@ -272,8 +307,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
- if (unlikely(!tmp))
- return;
if (req->dst) {
if (dst)
@@ -288,13 +321,16 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
void **kreq)
{
+ struct device *dev = HPRE_DEV(ctx);
struct hpre_asym_request *req;
- int err, id, done;
+ unsigned int err, done, alg;
+ int id;
#define HPRE_NO_HW_ERR 0
#define HPRE_HW_TASK_DONE 3
#define HREE_HW_ERR_MASK 0x7ff
#define HREE_SQE_DONE_MASK 0x3
+#define HREE_ALG_TYPE_MASK 0x1f
id = (int)le16_to_cpu(sqe->tag);
req = ctx->req_list[id];
hpre_rm_req_from_ctx(req);
@@ -307,7 +343,11 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
HREE_SQE_DONE_MASK;
if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
- return 0;
+ return 0;
+
+ alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
+ dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
+ alg, done, err);
return -EINVAL;
}
@@ -413,7 +453,6 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_sqe *sqe = resp;
struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
-
if (unlikely(!req)) {
atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
return;
@@ -422,18 +461,29 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
req->cb(ctx, resp);
}
-static int hpre_ctx_init(struct hpre_ctx *ctx)
+static void hpre_stop_qp_and_put(struct hisi_qp *qp)
+{
+ hisi_qm_stop_qp(qp);
+ hisi_qm_free_qps(&qp, 1);
+}
+
+static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
{
struct hisi_qp *qp;
+ int ret;
- qp = hpre_get_qp_and_start();
+ qp = hpre_get_qp_and_start(type);
if (IS_ERR(qp))
return PTR_ERR(qp);
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
- return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ if (ret)
+ hpre_stop_qp_and_put(qp);
+
+ return ret;
}
static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
@@ -510,7 +560,6 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
return ret;
}
-#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@@ -674,7 +723,7 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return hpre_ctx_init(ctx);
+ return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
}
static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
@@ -683,7 +732,6 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
hpre_dh_clear_ctx(ctx, true);
}
-#endif
static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
{
@@ -1100,7 +1148,7 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- ret = hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
if (ret)
crypto_free_akcipher(ctx->rsa.soft_tfm);
@@ -1115,6 +1163,734 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
crypto_free_akcipher(ctx->rsa.soft_tfm);
}
+static void hpre_key_to_big_end(u8 *data, int len)
+{
+ int i, j;
+ u8 tmp;
+
+ for (i = 0; i < len / 2; i++) {
+ j = len - i - 1;
+ tmp = data[j];
+ data[j] = data[i];
+ data[i] = tmp;
+ }
+}
+
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
+ bool is_ecdh)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+ unsigned int shift = sz << 1;
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (is_ecdh && ctx->ecdh.p) {
+ /* ecdh: p->a->k->b */
+ memzero_explicit(ctx->ecdh.p + shift, sz);
+ dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+ ctx->ecdh.p = NULL;
+ } else if (!is_ecdh && ctx->curve25519.p) {
+ /* curve25519: p->a->k */
+ memzero_explicit(ctx->curve25519.p + shift, sz);
+ dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
+ ctx->curve25519.dma_p);
+ ctx->curve25519.p = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+static unsigned int hpre_ecdh_supported_curve(unsigned short id)
+{
+ switch (id) {
+ case ECC_CURVE_NIST_P192:
+ case ECC_CURVE_NIST_P256:
+ return HPRE_ECC_HW256_KSZ_B;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
+{
+ unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
+ u8 i = 0;
+
+ while (i < ndigits - 1) {
+ memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
+ i++;
+ }
+
+ memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
+ hpre_key_to_big_end((u8 *)addr, cur_sz);
+}
+
+static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
+ unsigned int cur_sz)
+{
+ unsigned int shifta = ctx->key_sz << 1;
+ unsigned int shiftb = ctx->key_sz << 2;
+ void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
+ void *a = ctx->ecdh.p + shifta - cur_sz;
+ void *b = ctx->ecdh.p + shiftb - cur_sz;
+ void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
+ void *y = ctx->ecdh.g + shifta - cur_sz;
+ const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
+ char *n;
+
+ if (unlikely(!curve))
+ return -EINVAL;
+
+ n = kzalloc(ctx->key_sz, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
+ fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
+ fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
+ fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
+ fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
+ fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
+
+ if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
+ kfree(n);
+ return -EINVAL;
+ }
+
+ kfree(n);
+ return 0;
+}
+
+static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
+{
+ switch (id) {
+ case ECC_CURVE_NIST_P192:
+ return HPRE_ECC_NIST_P192_N_SIZE;
+ case ECC_CURVE_NIST_P256:
+ return HPRE_ECC_NIST_P256_N_SIZE;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz, shift, curve_sz;
+ int ret;
+
+ ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
+ if (!ctx->key_sz)
+ return -EINVAL;
+
+ curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ if (!curve_sz || params->key_size > curve_sz)
+ return -EINVAL;
+
+ sz = ctx->key_sz;
+
+ if (!ctx->ecdh.p) {
+ ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
+ GFP_KERNEL);
+ if (!ctx->ecdh.p)
+ return -ENOMEM;
+ }
+
+ shift = sz << 2;
+ ctx->ecdh.g = ctx->ecdh.p + shift;
+ ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
+
+ ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
+ if (ret) {
+ dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
+ dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+ ctx->ecdh.p = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool hpre_key_is_zero(char *key, unsigned short key_sz)
+{
+ int i;
+
+ for (i = 0; i < key_sz; i++)
+ if (key[i])
+ return false;
+
+ return true;
+}
+
+static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz, sz_shift;
+ struct ecdh params;
+ int ret;
+
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+ dev_err(dev, "failed to decode ecdh key!\n");
+ return -EINVAL;
+ }
+
+ if (hpre_key_is_zero(params.key, params.key_size)) {
+ dev_err(dev, "Invalid hpre key!\n");
+ return -EINVAL;
+ }
+
+ hpre_ecc_clear_ctx(ctx, false, true);
+
+ ret = hpre_ecdh_set_param(ctx, &params);
+ if (ret < 0) {
+ dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
+ return ret;
+ }
+
+ sz = ctx->key_sz;
+ sz_shift = (sz << 1) + sz - params.key_size;
+ memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
+
+ return 0;
+}
+
+static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t dma;
+
+ dma = le64_to_cpu(sqe->in);
+
+ if (src && req->src)
+ dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
+
+ dma = le64_to_cpu(sqe->out);
+
+ if (req->dst)
+ dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
+ if (dst)
+ dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
+}
+
+static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
+{
+ unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *req = NULL;
+ struct kpp_request *areq;
+ u64 overtime_thrhld;
+ char *p;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.ecdh;
+ areq->dst_len = ctx->key_sz << 1;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+ p = sg_virt(areq->dst);
+ memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
+ memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
+
+ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
+ struct kpp_request *req)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (req->dst_len < ctx->key_sz << 1) {
+ req->dst_len = ctx->key_sz << 1;
+ return -EINVAL;
+ }
+
+ tmp = kpp_request_ctx(req);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_ecdh_cb;
+ h_req->areq.ecdh = req;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64(ctx->ecdh.dma_p);
+
+ msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+ return 0;
+}
+
+static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int tmpshift;
+ dma_addr_t dma = 0;
+ void *ptr;
+ int shift;
+
+ /* Src_data include gx and gy. */
+ shift = ctx->key_sz - (len >> 1);
+ if (unlikely(shift < 0))
+ return -EINVAL;
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
+ if (unlikely(!ptr))
+ return -ENOMEM;
+
+ tmpshift = ctx->key_sz << 1;
+ scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
+ memcpy(ptr + shift, ptr + tmpshift, len >> 1);
+ memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
+
+ hpre_req->src = ptr;
+ msg->in = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ dma_addr_t dma = 0;
+
+ if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
+ dev_err(dev, "data or data length is illegal!\n");
+ return -EINVAL;
+ }
+
+ hpre_req->dst = NULL;
+ dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ msg->out = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_ecdh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ret;
+
+ ret = hpre_ecdh_msg_request_set(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
+ return ret;
+ }
+
+ if (req->src) {
+ ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init src data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+ } else {
+ msg->in = cpu_to_le64(ctx->ecdh.dma_g);
+ }
+
+ ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
+ ret = hpre_send(ctx, msg);
+ if (likely(!ret))
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+ return ret;
+}
+
+static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ /* max size is the pub_key_size, include x and y */
+ return ctx->key_sz << 1;
+}
+
+static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P192;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P256;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_ecc_clear_ctx(ctx, true, true);
+}
+
+static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
+ unsigned int len)
+{
+ u8 secret[CURVE25519_KEY_SIZE] = { 0 };
+ unsigned int sz = ctx->key_sz;
+ const struct ecc_curve *curve;
+ unsigned int shift = sz << 1;
+ void *p;
+
+ /*
+ * The key from 'buf' is in little-endian, we should preprocess it as
+ * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
+ * then convert it to big endian. Only in this way, the result can be
+ * the same as the software curve-25519 that exists in crypto.
+ */
+ memcpy(secret, buf, len);
+ curve25519_clamp_secret(secret);
+ hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
+
+ p = ctx->curve25519.p + sz - len;
+
+ curve = ecc_get_curve25519();
+
+ /* fill curve parameters */
+ fill_curve_param(p, curve->p, len, curve->g.ndigits);
+ fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
+ memcpy(p + shift, secret, len);
+ fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
+ memzero_explicit(secret, CURVE25519_KEY_SIZE);
+}
+
+static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
+ unsigned int len)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+ unsigned int shift = sz << 1;
+
+ /* p->a->k->gx */
+ if (!ctx->curve25519.p) {
+ ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
+ &ctx->curve25519.dma_p,
+ GFP_KERNEL);
+ if (!ctx->curve25519.p)
+ return -ENOMEM;
+ }
+
+ ctx->curve25519.g = ctx->curve25519.p + shift + sz;
+ ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
+
+ hpre_curve25519_fill_curve(ctx, buf, len);
+
+ return 0;
+}
+
+static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ int ret = -EINVAL;
+
+ if (len != CURVE25519_KEY_SIZE ||
+ !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+ dev_err(dev, "key is null or key len is not 32bytes!\n");
+ return ret;
+ }
+
+ /* Free old secret if any */
+ hpre_ecc_clear_ctx(ctx, false, false);
+
+ ctx->key_sz = CURVE25519_KEY_SIZE;
+ ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
+ if (ret) {
+ dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
+ hpre_ecc_clear_ctx(ctx, false, false);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t dma;
+
+ dma = le64_to_cpu(sqe->in);
+
+ if (src && req->src)
+ dma_free_coherent(dev, ctx->key_sz, req->src, dma);
+
+ dma = le64_to_cpu(sqe->out);
+
+ if (req->dst)
+ dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
+ if (dst)
+ dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
+}
+
+static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *req = NULL;
+ struct kpp_request *areq;
+ u64 overtime_thrhld;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.curve25519;
+ areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+ hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
+
+ hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
+ struct kpp_request *req)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (unlikely(req->dst_len < ctx->key_sz)) {
+ req->dst_len = ctx->key_sz;
+ return -EINVAL;
+ }
+
+ tmp = kpp_request_ctx(req);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_curve25519_cb;
+ h_req->areq.curve25519 = req;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64(ctx->curve25519.dma_p);
+
+ msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+ return 0;
+}
+
+static void hpre_curve25519_src_modulo_p(u8 *ptr)
+{
+ int i;
+
+ for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
+ ptr[i] = 0;
+
+ /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
+ ptr[i] -= 0xed;
+}
+
+static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ u8 p[CURVE25519_KEY_SIZE] = { 0 };
+ const struct ecc_curve *curve;
+ dma_addr_t dma = 0;
+ u8 *ptr;
+
+ if (len != CURVE25519_KEY_SIZE) {
+ dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
+ return -EINVAL;
+ }
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
+ if (unlikely(!ptr))
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(ptr, data, 0, len, 0);
+
+ if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+ dev_err(dev, "gx is null!\n");
+ goto err;
+ }
+
+ /*
+ * Src_data(gx) is in little-endian order, MSB in the final byte should
+ * be masked as described in RFC7748, then transform it to big-endian
+ * form, then hisi_hpre can use the data.
+ */
+ ptr[31] &= 0x7f;
+ hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
+
+ curve = ecc_get_curve25519();
+
+ fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
+
+ /*
+ * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
+ * we get its modulus to p, and then use it.
+ */
+ if (memcmp(ptr, p, ctx->key_sz) >= 0)
+ hpre_curve25519_src_modulo_p(ptr);
+
+ hpre_req->src = ptr;
+ msg->in = cpu_to_le64(dma);
+ return 0;
+
+err:
+ dma_free_coherent(dev, ctx->key_sz, ptr, dma);
+ return -EINVAL;
+}
+
+static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ dma_addr_t dma = 0;
+
+ if (!data || !sg_is_last(data) || len != ctx->key_sz) {
+ dev_err(dev, "data or data length is illegal!\n");
+ return -EINVAL;
+ }
+
+ hpre_req->dst = NULL;
+ dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ msg->out = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_curve25519_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ret;
+
+ ret = hpre_curve25519_msg_request_set(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
+ return ret;
+ }
+
+ if (req->src) {
+ ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init src data, ret = %d!\n",
+ ret);
+ goto clear_all;
+ }
+ } else {
+ msg->in = cpu_to_le64(ctx->curve25519.dma_g);
+ }
+
+ ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
+ ret = hpre_send(ctx, msg);
+ if (likely(!ret))
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+ return ret;
+}
+
+static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_ecc_clear_ctx(ctx, true, false);
+}
+
static struct akcipher_alg rsa = {
.sign = hpre_rsa_dec,
.verify = hpre_rsa_enc,
@@ -1135,7 +1911,6 @@ static struct akcipher_alg rsa = {
},
};
-#ifdef CONFIG_CRYPTO_DH
static struct kpp_alg dh = {
.set_secret = hpre_dh_set_secret,
.generate_public_key = hpre_dh_compute_value,
@@ -1152,9 +1927,83 @@ static struct kpp_alg dh = {
.cra_module = THIS_MODULE,
},
};
-#endif
-int hpre_algs_register(void)
+static struct kpp_alg ecdh_nist_p192 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p192_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p192",
+ .cra_driver_name = "hpre-ecdh",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg ecdh_nist_p256 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p256_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "hpre-ecdh",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg curve25519_alg = {
+ .set_secret = hpre_curve25519_set_secret,
+ .generate_public_key = hpre_curve25519_compute_value,
+ .compute_shared_secret = hpre_curve25519_compute_value,
+ .max_size = hpre_curve25519_max_size,
+ .init = hpre_curve25519_init_tfm,
+ .exit = hpre_curve25519_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "curve25519",
+ .cra_driver_name = "hpre-curve25519",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+
+static int hpre_register_ecdh(void)
+{
+ int ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p192);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p256);
+ if (ret) {
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hpre_unregister_ecdh(void)
+{
+ crypto_unregister_kpp(&ecdh_nist_p256);
+ crypto_unregister_kpp(&ecdh_nist_p192);
+}
+
+int hpre_algs_register(struct hisi_qm *qm)
{
int ret;
@@ -1162,19 +2011,37 @@ int hpre_algs_register(void)
ret = crypto_register_akcipher(&rsa);
if (ret)
return ret;
-#ifdef CONFIG_CRYPTO_DH
+
ret = crypto_register_kpp(&dh);
if (ret)
- crypto_unregister_akcipher(&rsa);
-#endif
+ goto unreg_rsa;
+
+ if (qm->ver >= QM_HW_V3) {
+ ret = hpre_register_ecdh();
+ if (ret)
+ goto unreg_dh;
+ ret = crypto_register_kpp(&curve25519_alg);
+ if (ret)
+ goto unreg_ecdh;
+ }
+ return 0;
+unreg_ecdh:
+ hpre_unregister_ecdh();
+unreg_dh:
+ crypto_unregister_kpp(&dh);
+unreg_rsa:
+ crypto_unregister_akcipher(&rsa);
return ret;
}
-void hpre_algs_unregister(void)
+void hpre_algs_unregister(struct hisi_qm *qm)
{
- crypto_unregister_akcipher(&rsa);
-#ifdef CONFIG_CRYPTO_DH
+ if (qm->ver >= QM_HW_V3) {
+ crypto_unregister_kpp(&curve25519_alg);
+ hpre_unregister_ecdh();
+ }
+
crypto_unregister_kpp(&dh);
-#endif
+ crypto_unregister_akcipher(&rsa);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index e7a2c70eb9cf..046bc962c8b2 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,7 +13,6 @@
#include <linux/uacce.h>
#include "hpre.h"
-#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_COMM_CNT_CLR_CE 0x0
@@ -119,7 +118,6 @@ static struct hisi_qm_list hpre_devices = {
};
static const char * const hpre_debug_file_name[] = {
- [HPRE_CURRENT_QM] = "current_qm",
[HPRE_CLEAR_ENABLE] = "rdclr_en",
[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};
@@ -226,41 +224,44 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
-struct hisi_qp *hpre_create_qp(void)
+struct hisi_qp *hpre_create_qp(u8 type)
{
int node = cpu_to_node(smp_processor_id());
struct hisi_qp *qp = NULL;
int ret;
- ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
+ if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
+ return NULL;
+
+ /*
+ * type: 0 - RSA/DH. algorithm supported in V2,
+ * 1 - ECC algorithm in V3.
+ */
+ ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
if (!ret)
return qp;
return NULL;
}
-static void hpre_pasid_enable(struct hisi_qm *qm)
+static void hpre_config_pasid(struct hisi_qm *qm)
{
- u32 val;
-
- val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
- val |= BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
- val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
- val |= BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
-}
+ u32 val1, val2;
-static void hpre_pasid_disable(struct hisi_qm *qm)
-{
- u32 val;
+ if (qm->ver >= QM_HW_V3)
+ return;
- val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
- val &= ~BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
- val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
- val &= ~BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+ val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ if (qm->use_sva) {
+ val1 |= BIT(HPRE_PASID_EN_BIT);
+ val2 |= BIT(HPRE_PASID_EN_BIT);
+ } else {
+ val1 &= ~BIT(HPRE_PASID_EN_BIT);
+ val2 &= ~BIT(HPRE_PASID_EN_BIT);
+ }
+ writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
+ writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
}
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
@@ -320,7 +321,7 @@ static int hpre_set_cluster(struct hisi_qm *qm)
}
/*
- * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
* Or it may stay in D3 state when we bind and unbind hpre quickly,
* as it does FLR triggered by hardware.
*/
@@ -383,15 +384,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
if (qm->ver == QM_HW_V2) {
ret = hpre_cfg_by_dsm(qm);
if (ret)
- dev_err(dev, "acpi_evaluate_dsm err.\n");
+ return ret;
disable_flr_of_bme(qm);
-
- /* Enable data buffer pasid */
- if (qm->use_sva)
- hpre_pasid_enable(qm);
}
+ /* Config data buffer pasid needed by Kunpeng 920 */
+ hpre_config_pasid(qm);
+
return ret;
}
@@ -401,10 +401,6 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
unsigned long offset;
int i;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* clear clusterX/cluster_ctrl */
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
@@ -456,49 +452,6 @@ static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
return &hpre->qm;
}
-static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
-{
- struct hisi_qm *qm = hpre_file_to_qm(file);
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
-{
- struct hisi_qm *qm = hpre_file_to_qm(file);
- u32 num_vfs = qm->vfs_num;
- u32 vfq_num, tmp;
-
- if (val > num_vfs)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
- if (val == num_vfs) {
- qm->debug.curr_qm_qp_num =
- qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
- } else {
- qm->debug.curr_qm_qp_num = vfq_num;
- }
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
-}
-
static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
@@ -519,7 +472,7 @@ static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
- return 0;
+ return 0;
}
static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
@@ -541,7 +494,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
- return 0;
+ return 0;
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -554,9 +507,6 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->type) {
- case HPRE_CURRENT_QM:
- val = hpre_current_qm_read(file);
- break;
case HPRE_CLEAR_ENABLE:
val = hpre_clear_enable_read(file);
break;
@@ -597,11 +547,6 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
spin_lock_irq(&file->lock);
switch (file->type) {
- case HPRE_CURRENT_QM:
- ret = hpre_current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case HPRE_CLEAR_ENABLE:
ret = hpre_clear_enable_write(file, val);
if (ret)
@@ -740,11 +685,6 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
{
int ret;
- ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
- HPRE_CURRENT_QM);
- if (ret)
- return ret;
-
ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
HPRE_CLEAR_ENABLE);
if (ret)
@@ -812,9 +752,9 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
}
if (pdev->revision >= QM_HW_V3)
- qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n";
+ qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
else
- qm->algs = "rsa\ndh\n";
+ qm->algs = "rsa\ndh";
qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
@@ -867,6 +807,20 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
}
+static void hpre_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR;
+ err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+ err_info->msi_wr_port = HPRE_WR_MSI_PORT;
+ err_info->acpi_rst = "HRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+}
+
static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_init = hpre_set_user_domain_and_cache,
.hw_err_enable = hpre_hw_error_enable,
@@ -875,16 +829,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR,
- .dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE,
- .msi_wr_port = HPRE_WR_MSI_PORT,
- .acpi_rst = "HRST",
- }
+ .err_info_init = hpre_err_info_init,
};
static int hpre_pf_probe_init(struct hpre *hpre)
@@ -892,13 +837,12 @@ static int hpre_pf_probe_init(struct hpre *hpre)
struct hisi_qm *qm = &hpre->qm;
int ret;
- qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
-
ret = hpre_set_user_domain_and_cache(qm);
if (ret)
return ret;
qm->err_ini = &hpre_err_ini;
+ qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
return 0;
@@ -1006,8 +950,6 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) {
- if (qm->use_sva && qm->ver == QM_HW_V2)
- hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
hisi_qm_dev_err_uninit(qm);
@@ -1016,7 +958,6 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_uninit(qm);
}
-
static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
@@ -1075,4 +1016,5 @@ module_exit(hpre_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 13cb4216561a..ce439a0c66c9 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -38,6 +38,7 @@
#define QM_MB_CMD_SQC_BT 0x4
#define QM_MB_CMD_CQC_BT 0x5
#define QM_MB_CMD_SQC_VFT_V2 0x6
+#define QM_MB_CMD_STOP_QP 0x8
#define QM_MB_CMD_SEND_BASE 0x300
#define QM_MB_EVENT_SHIFT 8
@@ -93,6 +94,12 @@
#define QM_DB_PRIORITY_SHIFT_V1 48
#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
+#define QM_QUE_ISO_CFG_V 0x0030
+#define QM_QUE_ISO_EN 0x100154
+#define QM_CAPBILITY 0x100158
+#define QM_QP_NUN_MASK GENMASK(10, 0)
+#define QM_QP_DB_INTERVAL 0x10000
+#define QM_QP_MAX_NUM_SHIFT 11
#define QM_DB_CMD_SHIFT_V2 12
#define QM_DB_RAND_SHIFT_V2 16
#define QM_DB_INDEX_SHIFT_V2 32
@@ -129,9 +136,9 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
+#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
-#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
+#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
#define QM_ABNORMAL_INT_STATUS 0x100008
#define QM_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
@@ -164,6 +171,14 @@
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
+#define QM_DFX_MB_CNT_VF 0x104010
+#define QM_DFX_DB_CNT_VF 0x104020
+#define QM_DFX_SQE_CNT_VF_SQN 0x104030
+#define QM_DFX_CQE_CNT_VF_CQN 0x104040
+#define QM_DFX_QN_SHIFT 16
+#define CURRENT_FUN_MASK GENMASK(5, 0)
+#define CURRENT_Q_MASK GENMASK(31, 16)
+
#define POLL_PERIOD 10
#define POLL_TIMEOUT 1000
#define WAIT_PERIOD_US_MAX 200
@@ -173,6 +188,7 @@
#define QM_CACHE_WB_DONE 0x208
#define PCI_BAR_2 2
+#define PCI_BAR_4 4
#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
#define QMC_ALIGN(sz) ALIGN(sz, 32)
@@ -334,6 +350,7 @@ struct hisi_qm_hw_ops {
void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
+ int (*stop_qp)(struct hisi_qp *qp);
};
struct qm_dfx_item {
@@ -350,6 +367,7 @@ static struct qm_dfx_item qm_dfx_files[] = {
};
static const char * const qm_debug_file_name[] = {
+ [CURRENT_QM] = "current_qm",
[CURRENT_Q] = "current_q",
[CLEAR_ENABLE] = "clear_enable",
};
@@ -373,6 +391,8 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
{ .int_msk = BIT(10), .msg = "qm_db_timeout" },
{ .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
+ { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
+ { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
{ /* sentinel */ }
};
@@ -557,21 +577,22 @@ static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
{
- u64 doorbell;
- u64 dbase;
+ void __iomem *io_base = qm->io_base;
u16 randata = 0;
+ u64 doorbell;
if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
- dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
+ io_base = qm->db_io_base + (u64)qn * qm->db_interval +
+ QM_DOORBELL_SQ_CQ_BASE_V2;
else
- dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
+ io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
((u64)randata << QM_DB_RAND_SHIFT_V2) |
((u64)index << QM_DB_INDEX_SHIFT_V2) |
((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
- writeq(doorbell, qm->io_base + dbase);
+ writeq(doorbell, io_base);
}
static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
@@ -865,6 +886,26 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
return 0;
}
+static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
+{
+ u32 remain_q_num, vfq_num;
+ u32 num_vfs = qm->vfs_num;
+
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+ if (vfq_num >= qm->max_qp_num)
+ return qm->max_qp_num;
+
+ remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
+ if (vfq_num + remain_q_num <= qm->max_qp_num)
+ return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
+
+ /*
+ * if vfq_num + remain_q_num > max_qp_num, the last VFs,
+ * each with one more queue.
+ */
+ return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
+}
+
static struct hisi_qm *file_to_qm(struct debugfs_file *file)
{
struct qm_debug *debug = file->debug;
@@ -918,6 +959,41 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
return 0;
}
+static u32 current_qm_read(struct debugfs_file *file)
+{
+ struct hisi_qm *qm = file_to_qm(file);
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int current_qm_write(struct debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = file_to_qm(file);
+ u32 tmp;
+
+ if (val > qm->vfs_num)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (!val)
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ else
+ qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
static ssize_t qm_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
@@ -929,6 +1005,9 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
mutex_lock(&file->lock);
switch (index) {
+ case CURRENT_QM:
+ val = current_qm_read(file);
+ break;
case CURRENT_Q:
val = current_q_read(file);
break;
@@ -971,27 +1050,24 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
mutex_lock(&file->lock);
switch (index) {
+ case CURRENT_QM:
+ ret = current_qm_write(file, val);
+ break;
case CURRENT_Q:
ret = current_q_write(file, val);
- if (ret)
- goto err_input;
break;
case CLEAR_ENABLE:
ret = clear_enable_write(file, val);
- if (ret)
- goto err_input;
break;
default:
ret = -EINVAL;
- goto err_input;
}
mutex_unlock(&file->lock);
- return count;
+ if (ret)
+ return ret;
-err_input:
- mutex_unlock(&file->lock);
- return ret;
+ return count;
}
static const struct file_operations qm_debug_fops = {
@@ -1529,12 +1605,12 @@ static const struct file_operations qm_cmd_fops = {
.write = qm_cmd_write,
};
-static void qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
+static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
+ enum qm_debug_file index)
{
- struct dentry *qm_d = qm->debug.qm_d;
struct debugfs_file *file = qm->debug.files + index;
- debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
+ debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
&qm_debug_fops);
file->index = index;
@@ -1628,7 +1704,7 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
writel(error_status, qm->io_base +
QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_ini->err_info.nfe,
+ writel(qm->err_info.nfe,
qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_RECOVERED;
}
@@ -1639,6 +1715,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static int qm_stop_qp(struct hisi_qp *qp)
+{
+ return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.get_irq_num = qm_get_irq_num_v1,
@@ -1654,6 +1735,16 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.hw_error_handle = qm_hw_error_handle_v2,
};
+static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .get_irq_num = qm_get_irq_num_v2,
+ .hw_error_init = qm_hw_error_init_v2,
+ .hw_error_uninit = qm_hw_error_uninit_v2,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .stop_qp = qm_stop_qp,
+};
+
static void *qm_get_avail_sqe(struct hisi_qp *qp)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1933,6 +2024,14 @@ static int qm_drain_qp(struct hisi_qp *qp)
if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
return 0;
+ /* Kunpeng930 supports drain qp by device */
+ if (qm->ops->stop_qp) {
+ ret = qm->ops->stop_qp(qp);
+ if (ret)
+ dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
+ return ret;
+ }
+
addr = qm_ctx_alloc(qm, size, &dma_addr);
if (IS_ERR(addr)) {
dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
@@ -2132,6 +2231,8 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
{
struct hisi_qp *qp = q->priv;
struct hisi_qm *qm = qp->qm;
+ resource_size_t phys_base = qm->db_phys_base +
+ qp->qp_id * qm->db_interval;
size_t sz = vma->vm_end - vma->vm_start;
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
@@ -2143,16 +2244,19 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
if (qm->ver == QM_HW_V1) {
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
- } else {
+ } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
+ } else {
+ if (sz > qm->db_interval)
+ return -EINVAL;
}
vma->vm_flags |= VM_IO;
return remap_pfn_range(vma, vma->vm_start,
- qm->phys_base >> PAGE_SHIFT,
+ phys_base >> PAGE_SHIFT,
sz, pgprot_noncached(vma->vm_page_prot));
case UACCE_QFRT_DUS:
if (sz != qp->qdma.size)
@@ -2267,14 +2371,20 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->priv = qm;
uacce->algs = qm->algs;
- if (qm->ver == QM_HW_V1) {
- mmio_page_nr = QM_DOORBELL_PAGE_NR;
+ if (qm->ver == QM_HW_V1)
uacce->api_ver = HISI_QM_API_VER_BASE;
- } else {
+ else if (qm->ver == QM_HW_V2)
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ else
+ uacce->api_ver = HISI_QM_API_VER3_BASE;
+
+ if (qm->ver == QM_HW_V1)
+ mmio_page_nr = QM_DOORBELL_PAGE_NR;
+ else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
mmio_page_nr = QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
- uacce->api_ver = HISI_QM_API_VER2_BASE;
- }
+ else
+ mmio_page_nr = qm->db_interval / PAGE_SIZE;
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
@@ -2482,8 +2592,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1)
qm->ops = &qm_hw_ops_v1;
- else
+ else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
+ else
+ qm->ops = &qm_hw_ops_v3;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
@@ -2492,13 +2604,23 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->misc_ctl = false;
}
-static void hisi_qm_pci_uninit(struct hisi_qm *qm)
+static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
- pci_free_irq_vectors(pdev);
+ if (qm->use_db_isolation)
+ iounmap(qm->db_io_base);
+
iounmap(qm->io_base);
pci_release_mem_regions(pdev);
+}
+
+static void hisi_qm_pci_uninit(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ pci_free_irq_vectors(pdev);
+ qm_put_pci_res(qm);
pci_disable_device(pdev);
}
@@ -2527,7 +2649,6 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
- memset(&qm->qdma, 0, sizeof(qm->qdma));
}
qm_irq_unregister(qm);
@@ -2681,7 +2802,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
{
int ret;
- WARN_ON(!qm->qdma.dma);
+ WARN_ON(!qm->qdma.va);
if (qm->fun_type == QM_HW_PF) {
ret = qm_dev_mem_reset(qm);
@@ -2930,9 +3051,11 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
qm->debug.qm_d = qm_d;
/* only show this in PF */
- if (qm->fun_type == QM_HW_PF)
+ if (qm->fun_type == QM_HW_PF) {
+ qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
- qm_create_debugfs_file(qm, i);
+ qm_create_debugfs_file(qm, qm_d, i);
+ }
debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
@@ -2960,6 +3083,10 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
struct qm_dfx_registers *regs;
int i;
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
/* clear current_q */
writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
@@ -2982,7 +3109,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
static void qm_hw_error_init(struct hisi_qm *qm)
{
- const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
+ struct hisi_qm_err_info *err_info = &qm->err_info;
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
@@ -3175,30 +3302,46 @@ EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
{
- u32 remain_q_num, q_num, i, j;
+ u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
+ u32 max_qp_num = qm->max_qp_num;
u32 q_base = qm->qp_num;
int ret;
if (!num_vfs)
return -EINVAL;
- remain_q_num = qm->ctrl_qp_num - qm->qp_num;
+ vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
- /* If remain queues not enough, return error. */
- if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
+ /* If vfs_q_num is less than num_vfs, return error. */
+ if (vfs_q_num < num_vfs)
return -EINVAL;
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ q_num = vfs_q_num / num_vfs;
+ remain_q_num = vfs_q_num % num_vfs;
+
+ for (i = num_vfs; i > 0; i--) {
+ /*
+ * if q_num + remain_q_num > max_qp_num in last vf, divide the
+ * remaining queues equally.
+ */
+ if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
+ act_q_num = q_num + remain_q_num;
+ remain_q_num = 0;
+ } else if (remain_q_num > 0) {
+ act_q_num = q_num + 1;
+ remain_q_num--;
+ } else {
+ act_q_num = q_num;
+ }
+
+ act_q_num = min_t(int, act_q_num, max_qp_num);
+ ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
if (ret) {
- for (j = i; j > 0; j--)
+ for (j = num_vfs; j > i; j--)
hisi_qm_set_vft(qm, j, 0, 0);
return ret;
}
- q_base += q_num;
+ q_base += act_q_num;
}
return 0;
@@ -3318,15 +3461,15 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
/* get device hardware error status */
err_sts = qm->err_ini->get_dev_hw_err_status(qm);
if (err_sts) {
- if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
+ if (err_sts & qm->err_info.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
if (qm->err_ini->log_dev_hw_err)
qm->err_ini->log_dev_hw_err(qm, err_sts);
/* ce error does not need to be reset */
- if ((err_sts | qm->err_ini->err_info.dev_ce_mask) ==
- qm->err_ini->err_info.dev_ce_mask) {
+ if ((err_sts | qm->err_info.dev_ce_mask) ==
+ qm->err_info.dev_ce_mask) {
if (qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm,
err_sts);
@@ -3639,7 +3782,7 @@ static int qm_soft_reset(struct hisi_qm *qm)
acpi_status s;
s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
- qm->err_ini->err_info.acpi_rst,
+ qm->err_info.acpi_rst,
NULL, &value);
if (ACPI_FAILURE(s)) {
pci_err(pdev, "NO controller reset method!\n");
@@ -3707,12 +3850,11 @@ static void qm_restart_prepare(struct hisi_qm *qm)
/* temporarily close the OOO port used for PEH to write out MSI */
value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
- writel(value & ~qm->err_ini->err_info.msi_wr_port,
+ writel(value & ~qm->err_info.msi_wr_port,
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
/* clear dev ecc 2bit error source if having */
- value = qm_get_dev_err_status(qm) &
- qm->err_ini->err_info.ecc_2bits_mask;
+ value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
if (value && qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm, value);
@@ -3736,7 +3878,7 @@ static void qm_restart_done(struct hisi_qm *qm)
/* open the OOO port for PEH to write out MSI */
value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
- value |= qm->err_ini->err_info.msi_wr_port;
+ value |= qm->err_info.msi_wr_port;
writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
qm->err_status.is_qm_ecc_mbit = false;
@@ -3875,8 +4017,7 @@ static int qm_check_dev_error(struct hisi_qm *qm)
if (ret)
return ret;
- return (qm_get_dev_err_status(qm) &
- qm->err_ini->err_info.ecc_2bits_mask);
+ return (qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask);
}
void hisi_qm_reset_prepare(struct pci_dev *pdev)
@@ -4084,7 +4225,7 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
mutex_unlock(&qm_list->lock);
if (flag) {
- ret = qm_list->register_to_crypto();
+ ret = qm_list->register_to_crypto(qm);
if (ret) {
mutex_lock(&qm_list->lock);
list_del(&qm->list);
@@ -4115,59 +4256,134 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
mutex_unlock(&qm_list->lock);
if (list_empty(&qm_list->list))
- qm_list->unregister_from_crypto();
+ qm_list->unregister_from_crypto(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
-static int hisi_qm_pci_init(struct hisi_qm *qm)
+static int qm_get_qp_num(struct hisi_qm *qm)
+{
+ if (qm->ver == QM_HW_V1)
+ qm->ctrl_qp_num = QM_QNUM_V1;
+ else if (qm->ver == QM_HW_V2)
+ qm->ctrl_qp_num = QM_QNUM_V2;
+ else
+ qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
+ QM_QP_NUN_MASK;
+
+ if (qm->use_db_isolation)
+ qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
+ QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
+ else
+ qm->max_qp_num = qm->ctrl_qp_num;
+
+ /* check if qp number is valid */
+ if (qm->qp_num > qm->max_qp_num) {
+ dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
+ qm->qp_num, qm->max_qp_num);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
- unsigned int num_vec;
int ret;
- ret = pci_enable_device_mem(pdev);
- if (ret < 0) {
- dev_err(dev, "Failed to enable device mem!\n");
- return ret;
- }
-
ret = pci_request_mem_regions(pdev, qm->dev_name);
if (ret < 0) {
dev_err(dev, "Failed to request mem regions!\n");
- goto err_disable_pcidev;
+ return ret;
}
qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
- qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
- qm->io_base = ioremap(qm->phys_base, qm->phys_size);
+ qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
if (!qm->io_base) {
ret = -EIO;
- goto err_release_mem_regions;
+ goto err_request_mem_regions;
+ }
+
+ if (qm->ver > QM_HW_V2) {
+ if (qm->fun_type == QM_HW_PF)
+ qm->use_db_isolation = readl(qm->io_base +
+ QM_QUE_ISO_EN) & BIT(0);
+ else
+ qm->use_db_isolation = readl(qm->io_base +
+ QM_QUE_ISO_CFG_V) & BIT(0);
+ }
+
+ if (qm->use_db_isolation) {
+ qm->db_interval = QM_QP_DB_INTERVAL;
+ qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
+ qm->db_io_base = ioremap(qm->db_phys_base,
+ pci_resource_len(pdev, PCI_BAR_4));
+ if (!qm->db_io_base) {
+ ret = -EIO;
+ goto err_ioremap;
+ }
+ } else {
+ qm->db_phys_base = qm->phys_base;
+ qm->db_io_base = qm->io_base;
+ qm->db_interval = 0;
}
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm_get_qp_num(qm);
+ if (ret)
+ goto err_db_ioremap;
+ }
+
+ return 0;
+
+err_db_ioremap:
+ if (qm->use_db_isolation)
+ iounmap(qm->db_io_base);
+err_ioremap:
+ iounmap(qm->io_base);
+err_request_mem_regions:
+ pci_release_mem_regions(pdev);
+ return ret;
+}
+
+static int hisi_qm_pci_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned int num_vec;
+ int ret;
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable device mem!\n");
+ return ret;
+ }
+
+ ret = qm_get_pci_res(qm);
+ if (ret)
+ goto err_disable_pcidev;
+
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret < 0)
- goto err_iounmap;
+ goto err_get_pci_res;
pci_set_master(pdev);
if (!qm->ops->get_irq_num) {
ret = -EOPNOTSUPP;
- goto err_iounmap;
+ goto err_get_pci_res;
}
num_vec = qm->ops->get_irq_num(qm);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
- goto err_iounmap;
+ goto err_get_pci_res;
}
return 0;
-err_iounmap:
- iounmap(qm->io_base);
-err_release_mem_regions:
- pci_release_mem_regions(pdev);
+err_get_pci_res:
+ qm_put_pci_res(qm);
err_disable_pcidev:
pci_disable_device(pdev);
return ret;
@@ -4187,28 +4403,28 @@ int hisi_qm_init(struct hisi_qm *qm)
hisi_qm_pre_init(qm);
- ret = qm_alloc_uacce(qm);
- if (ret < 0)
- dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
-
ret = hisi_qm_pci_init(qm);
if (ret)
- goto err_remove_uacce;
+ return ret;
ret = qm_irq_register(qm);
if (ret)
- goto err_pci_uninit;
+ goto err_pci_init;
if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
/* v2 starts to support get vft by mailbox */
ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
if (ret)
- goto err_irq_unregister;
+ goto err_irq_register;
}
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
+
ret = hisi_qm_memory_init(qm);
if (ret)
- goto err_irq_unregister;
+ goto err_alloc_uacce;
INIT_WORK(&qm->work, qm_work_process);
if (qm->fun_type == QM_HW_PF)
@@ -4218,13 +4434,13 @@ int hisi_qm_init(struct hisi_qm *qm)
return 0;
-err_irq_unregister:
- qm_irq_unregister(qm);
-err_pci_uninit:
- hisi_qm_pci_uninit(qm);
-err_remove_uacce:
+err_alloc_uacce:
uacce_remove(qm->uacce);
qm->uacce = NULL;
+err_irq_register:
+ qm_irq_unregister(qm);
+err_pci_init:
+ hisi_qm_pci_uninit(qm);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_init);
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 54967c6b9c78..acefdf8b3a50 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -51,14 +51,6 @@
#define PEH_AXUSER_CFG 0x401001
#define PEH_AXUSER_CFG_ENABLE 0xffffffff
-#define QM_DFX_MB_CNT_VF 0x104010
-#define QM_DFX_DB_CNT_VF 0x104020
-#define QM_DFX_SQE_CNT_VF_SQN 0x104030
-#define QM_DFX_CQE_CNT_VF_CQN 0x104040
-#define QM_DFX_QN_SHIFT 16
-#define CURRENT_FUN_MASK GENMASK(5, 0)
-#define CURRENT_Q_MASK GENMASK(31, 16)
-
#define QM_AXI_RRESP BIT(0)
#define QM_AXI_BRESP BIT(1)
#define QM_ECC_MBIT BIT(2)
@@ -72,10 +64,13 @@
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
#define QM_DB_RANDOM_INVALID BIT(12)
+#define QM_MAILBOX_TIMEOUT BIT(13)
+#define QM_FLR_TIMEOUT BIT(14)
#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
+ QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
+ QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
@@ -123,6 +118,7 @@ enum qm_fun_type {
};
enum qm_debug_file {
+ CURRENT_QM,
CURRENT_Q,
CLEAR_ENABLE,
DEBUG_FILE_NUM,
@@ -193,14 +189,14 @@ struct hisi_qm_err_ini {
void (*open_axi_master_ooo)(struct hisi_qm *qm);
void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
- struct hisi_qm_err_info err_info;
+ void (*err_info_init)(struct hisi_qm *qm);
};
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
- int (*register_to_crypto)(void);
- void (*unregister_from_crypto)(void);
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
};
struct hisi_qm {
@@ -209,12 +205,15 @@ struct hisi_qm {
const char *dev_name;
struct pci_dev *pdev;
void __iomem *io_base;
+ void __iomem *db_io_base;
u32 sqe_size;
u32 qp_base;
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
+ u32 max_qp_num;
u32 vfs_num;
+ u32 db_interval;
struct list_head list;
struct hisi_qm_list *qm_list;
@@ -230,6 +229,7 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_info err_info;
struct hisi_qm_err_status err_status;
unsigned long misc_ctl; /* driver removing and reset sched */
@@ -252,8 +252,11 @@ struct hisi_qm {
const char *algs;
bool use_sva;
bool is_frozen;
+
+ /* doorbell isolation enable */
+ bool use_db_isolation;
resource_size_t phys_base;
- resource_size_t phys_size;
+ resource_size_t db_phys_base;
struct uacce_device *uacce;
int mode;
};
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 8ca945ac297e..0a3c8f019b02 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2016-2017 Hisilicon Limited. */
+/* Copyright (c) 2016-2017 HiSilicon Limited. */
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index 91ee2bb575df..c8de1b51c843 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ * Driver for the HiSilicon SEC units found on Hip06 Hip07
*
- * Copyright (c) 2016-2017 Hisilicon Limited.
+ * Copyright (c) 2016-2017 HiSilicon Limited.
*/
#include <linux/acpi.h>
#include <linux/atomic.h>
@@ -233,7 +233,7 @@ static int sec_queue_map_io(struct sec_queue *queue)
IORESOURCE_MEM,
2 + queue->queue_id);
if (!res) {
- dev_err(dev, "Failed to get queue %d memory resource\n",
+ dev_err(dev, "Failed to get queue %u memory resource\n",
queue->queue_id);
return -ENOMEM;
}
@@ -653,12 +653,12 @@ static int sec_queue_free(struct sec_queue *queue)
struct sec_dev_info *info = queue->dev_info;
if (queue->queue_id >= SEC_Q_NUM) {
- dev_err(info->dev, "No queue %d\n", queue->queue_id);
+ dev_err(info->dev, "No queue %u\n", queue->queue_id);
return -ENODEV;
}
if (!queue->in_use) {
- dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+ dev_err(info->dev, "Queue %u is idle\n", queue->queue_id);
return -ENODEV;
}
@@ -834,6 +834,7 @@ int sec_queue_stop_release(struct sec_queue *queue)
/**
* sec_queue_empty() - Is this hardware queue currently empty.
+ * @queue: The queue to test
*
* We need to know if we have an empty queue for some of the chaining modes
* as if it is not empty we may need to hold the message in a software queue
@@ -1315,6 +1316,6 @@ static struct platform_driver sec_driver = {
module_platform_driver(sec_driver);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_DESCRIPTION("HiSilicon Security Accelerators");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 4d9063a8b10b..179a8250d691 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2017 Hisilicon Limited. */
+/* Copyright (c) 2016-2017 HiSilicon Limited. */
#ifndef _SEC_DRV_H_
#define _SEC_DRV_H_
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 08491912afd5..dfdce2f21e65 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -4,8 +4,6 @@
#ifndef __HISI_SEC_V2_H
#define __HISI_SEC_V2_H
-#include <linux/list.h>
-
#include "../qm.h"
#include "sec_crypto.h"
@@ -50,7 +48,7 @@ struct sec_req {
int err_type;
int req_id;
- int flag;
+ u32 flag;
/* Status of the SEC request */
bool fake_busy;
@@ -139,6 +137,7 @@ struct sec_ctx {
bool pbuf_supported;
struct sec_cipher_ctx c_ctx;
struct sec_auth_ctx a_ctx;
+ struct device *dev;
};
enum sec_endian {
@@ -148,7 +147,6 @@ enum sec_endian {
};
enum sec_debug_file_index {
- SEC_CURRENT_QM,
SEC_CLEAR_ENABLE,
SEC_DEBUG_FILE_NUM,
};
@@ -183,6 +181,6 @@ struct sec_dev {
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 2eaa516b3231..133aede8bf07 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -7,6 +7,7 @@
#include <crypto/des.h>
#include <crypto/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/skcipher.h>
@@ -43,7 +44,6 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
-#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
@@ -96,7 +96,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
0, QM_Q_DEPTH, GFP_ATOMIC);
mutex_unlock(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
- dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ dev_err(req->ctx->dev, "alloc req id fail!\n");
return req_id;
}
@@ -112,7 +112,7 @@ static void sec_free_req_id(struct sec_req *req)
int req_id = req->req_id;
if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
- dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
+ dev_err(req->ctx->dev, "free request id invalid!\n");
return;
}
@@ -138,7 +138,7 @@ static int sec_aead_verify(struct sec_req *req)
aead_req->cryptlen + aead_req->assoclen -
authsize);
if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
- dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ dev_err(req->ctx->dev, "aead verify failure!\n");
return -EBADMSG;
}
@@ -177,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
if (unlikely(req->err_type || done != SEC_SQE_DONE ||
(ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
(ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
- dev_err(SEC_CTX_DEV(ctx),
+ dev_err_ratelimited(ctx->dev,
"err_type[%d],done[%d],flag[%d]\n",
req->err_type, done, flag);
err = -EIO;
@@ -326,8 +326,8 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
struct sec_alg_res *res = qp_ctx->res;
+ struct device *dev = ctx->dev;
int ret;
ret = sec_alloc_civ_resource(dev, res);
@@ -360,7 +360,7 @@ alloc_fail:
static void sec_alg_resource_free(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
sec_free_civ_resource(dev, qp_ctx->res);
@@ -373,7 +373,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
struct sec_qp_ctx *qp_ctx;
struct hisi_qp *qp;
int ret = -ENOMEM;
@@ -428,7 +428,7 @@ err_destroy_idr:
static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
hisi_qm_stop_qp(qp_ctx->qp);
sec_alg_resource_free(ctx, qp_ctx);
@@ -452,6 +452,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
+ ctx->dev = &sec->qm.pdev->dev;
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
@@ -476,11 +477,9 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
-
kfree(ctx->qp_ctx);
err_destroy_qps:
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
-
return ret;
}
@@ -499,7 +498,7 @@ static int sec_cipher_init(struct sec_ctx *ctx)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&c_ctx->c_key_dma, GFP_KERNEL);
if (!c_ctx->c_key)
return -ENOMEM;
@@ -512,7 +511,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
c_ctx->c_key, c_ctx->c_key_dma);
}
@@ -520,7 +519,7 @@ static int sec_auth_init(struct sec_ctx *ctx)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&a_ctx->a_key_dma, GFP_KERNEL);
if (!a_ctx->a_key)
return -ENOMEM;
@@ -533,7 +532,7 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
a_ctx->a_key, a_ctx->a_key_dma);
}
@@ -546,7 +545,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ pr_err("get error skcipher iv size!\n");
return -EINVAL;
}
@@ -573,10 +572,18 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
sec_ctx_base_uninit(ctx);
}
-static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
const u32 keylen,
const enum sec_cmode c_mode)
{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int ret;
+
+ ret = verify_skcipher_des3_key(tfm, key);
+ if (ret)
+ return ret;
+
switch (keylen) {
case SEC_DES3_2KEY_SIZE:
c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
@@ -633,12 +640,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
int ret;
if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
+ dev_err(dev, "xts mode key err!\n");
return ret;
}
}
@@ -648,7 +656,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
switch (c_alg) {
case SEC_CALG_3DES:
- ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
+ ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
break;
case SEC_CALG_AES:
case SEC_CALG_SM4:
@@ -659,7 +667,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
+ dev_err(dev, "set sec key err!\n");
return ret;
}
@@ -691,7 +699,7 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -701,21 +709,14 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
copy_size = c_req->c_len;
pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf,
- copy_size);
-
+ qp_ctx->res[req_id].pbuf,
+ copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
}
c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
-
- if (!c_req->c_in_dma) {
- dev_err(dev, "fail to set pbuffer address!\n");
- return -ENOMEM;
- }
-
c_req->c_out_dma = c_req->c_in_dma;
return 0;
@@ -727,7 +728,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -739,7 +740,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
qp_ctx->res[req_id].pbuf,
copy_size);
-
if (unlikely(pbuf_length != copy_size))
dev_err(dev, "copy pbuf data to dst error!\n");
}
@@ -751,7 +751,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int ret;
if (req->use_pbuf) {
@@ -806,7 +806,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
if (req->use_pbuf) {
sec_cipher_pbuf_unmap(ctx, req, dst);
@@ -891,6 +891,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
int ret;
@@ -904,13 +905,13 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ret = sec_aead_aes_set_key(c_ctx, &keys);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ dev_err(dev, "set sec cipher key err!\n");
goto bad_key;
}
ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ dev_err(dev, "set sec auth key err!\n");
goto bad_key;
}
@@ -1062,7 +1063,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
cryptlen - iv_size);
if (unlikely(sz != iv_size))
- dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
+ dev_err(req->ctx->dev, "copy output iv error!\n");
}
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
@@ -1160,7 +1161,7 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
ret = sec_skcipher_bd_fill(ctx, req);
if (unlikely(ret)) {
- dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ dev_err(ctx->dev, "skcipher bd fill is error!\n");
return ret;
}
@@ -1194,7 +1195,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
a_req->assoclen);
if (unlikely(sz != authsize)) {
- dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
}
}
@@ -1259,7 +1260,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
ret = ctx->req_op->bd_send(ctx, req);
if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
- dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
goto err_send_req;
}
@@ -1325,7 +1326,7 @@ static int sec_aead_init(struct crypto_aead *tfm)
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ dev_err(ctx->dev, "get error aead iv size!\n");
return -EINVAL;
}
@@ -1374,7 +1375,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(auth_ctx->hash_tfm)) {
- dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
return PTR_ERR(auth_ctx->hash_tfm);
}
@@ -1405,10 +1406,40 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
+
+static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
+ struct sec_req *sreq)
+{
+ u32 cryptlen = sreq->c_req.sk_req->cryptlen;
+ struct device *dev = ctx->dev;
+ u8 c_mode = ctx->c_ctx.c_mode;
+ int ret = 0;
+
+ switch (c_mode) {
+ case SEC_CMODE_XTS:
+ if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
+ dev_err(dev, "skcipher XTS mode input length error!\n");
+ ret = -EINVAL;
+ }
+ break;
+ case SEC_CMODE_ECB:
+ case SEC_CMODE_CBC:
+ if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher AES input length error!\n");
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct skcipher_request *sk_req = sreq->c_req.sk_req;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
if (unlikely(!sk_req->src || !sk_req->dst)) {
@@ -1429,12 +1460,9 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
- dev_err(dev, "skcipher aes input length error!\n");
- return -EINVAL;
- }
- return 0;
+ return sec_skcipher_cryptlen_ckeck(ctx, sreq);
}
+
dev_err(dev, "skcipher algorithm error!\n");
return -EINVAL;
@@ -1531,14 +1559,15 @@ static struct skcipher_alg sec_skciphers[] = {
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
- u8 c_alg = ctx->c_ctx.c_alg;
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
size_t authsize = crypto_aead_authsize(tfm);
+ struct device *dev = ctx->dev;
+ u8 c_alg = ctx->c_ctx.c_alg;
if (unlikely(!req->src || !req->dst || !req->cryptlen ||
req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ dev_err(dev, "aead input param error!\n");
return -EINVAL;
}
@@ -1550,7 +1579,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
/* Support AES only */
if (unlikely(c_alg != SEC_CALG_AES)) {
- dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ dev_err(dev, "aead crypto alg error!\n");
return -EINVAL;
}
if (sreq->c_req.encrypt)
@@ -1559,7 +1588,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
sreq->c_req.c_len = req->cryptlen - authsize;
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ dev_err(dev, "aead crypto length error!\n");
return -EINVAL;
}
@@ -1634,7 +1663,7 @@ static struct aead_alg sec_aeads[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
};
-int sec_register_to_crypto(void)
+int sec_register_to_crypto(struct hisi_qm *qm)
{
int ret;
@@ -1651,7 +1680,7 @@ int sec_register_to_crypto(void)
return ret;
}
-void sec_unregister_from_crypto(void)
+void sec_unregister_from_crypto(struct hisi_qm *qm)
{
crypto_unregister_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index b2786e17d8fe..9c78edac56a4 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -64,7 +64,6 @@ enum sec_addr_type {
};
struct sec_sqe_type2 {
-
/*
* mac_len: 0~4 bits
* a_key_len: 5~10 bits
@@ -120,7 +119,6 @@ struct sec_sqe_type2 {
/* c_pad_len_field: 0~1 bits */
__le16 c_pad_len_field;
-
__le64 long_a_data_len;
__le64 a_ivin_addr;
__le64 a_key_addr;
@@ -211,6 +209,6 @@ struct sec_sqe {
struct sec_sqe_type2 type2;
};
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index dc68ba76f65e..6f0062d4408c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -19,7 +19,6 @@
#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
-#define SEC_QUEUE_NUM_V2 1024
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
@@ -35,18 +34,16 @@
#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
-#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
-#define SEC_ENGINE_PF_CFG_OFF 0x300000
-#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
-#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
-#define SEC_ECC_ADDR(err) ((err) >> 0)
+#define SEC_ECC_NUM 16
+#define SEC_ECC_MASH 0xFF
#define SEC_CORE_INT_DISABLE 0x0
-#define SEC_CORE_INT_ENABLE 0x1ff
-#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_CORE_INT_ENABLE 0x7c1ff
+#define SEC_CORE_INT_CLEAR 0x7c1ff
#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
@@ -54,24 +51,24 @@
#define SEC_RAS_NFE_REG 0x301058
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
-#define SEC_RAS_NFE_ENB_MSK 0x177
-#define SEC_RAS_DISABLE 0x0
-#define SEC_MEM_START_INIT_REG 0x0100
-#define SEC_MEM_INIT_DONE_REG 0x0104
+#define SEC_RAS_NFE_ENB_MSK 0x7c177
+#define SEC_RAS_DISABLE 0x0
+#define SEC_MEM_START_INIT_REG 0x301100
+#define SEC_MEM_INIT_DONE_REG 0x301104
-#define SEC_CONTROL_REG 0x0200
+#define SEC_CONTROL_REG 0x301200
#define SEC_TRNG_EN_SHIFT 8
#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
-#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
-#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
-#define SEC_SAA_EN_REG 0x0270
-#define SEC_BD_ERR_CHK_EN_REG0 0x0380
-#define SEC_BD_ERR_CHK_EN_REG1 0x0384
-#define SEC_BD_ERR_CHK_EN_REG3 0x038c
+#define SEC_INTERFACE_USER_CTRL0_REG 0x301220
+#define SEC_INTERFACE_USER_CTRL1_REG 0x301224
+#define SEC_SAA_EN_REG 0x301270
+#define SEC_BD_ERR_CHK_EN_REG0 0x301380
+#define SEC_BD_ERR_CHK_EN_REG1 0x301384
+#define SEC_BD_ERR_CHK_EN_REG3 0x30138c
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
@@ -95,9 +92,6 @@
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
-#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
- SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
-
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -117,20 +111,66 @@ static struct hisi_qm_list sec_devices = {
};
static const struct sec_hw_error sec_hw_errors[] = {
- {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
- {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
- {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
- {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
- {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
- {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
- {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
- {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
- {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "sec_axi_rresp_err_rint"
+ },
+ {
+ .int_msk = BIT(1),
+ .msg = "sec_axi_bresp_err_rint"
+ },
+ {
+ .int_msk = BIT(2),
+ .msg = "sec_ecc_2bit_err_rint"
+ },
+ {
+ .int_msk = BIT(3),
+ .msg = "sec_ecc_1bit_err_rint"
+ },
+ {
+ .int_msk = BIT(4),
+ .msg = "sec_req_trng_timeout_rint"
+ },
+ {
+ .int_msk = BIT(5),
+ .msg = "sec_fsm_hbeat_rint"
+ },
+ {
+ .int_msk = BIT(6),
+ .msg = "sec_channel_req_rng_timeout_rint"
+ },
+ {
+ .int_msk = BIT(7),
+ .msg = "sec_bd_err_rint"
+ },
+ {
+ .int_msk = BIT(8),
+ .msg = "sec_chain_buff_err_rint"
+ },
+ {
+ .int_msk = BIT(14),
+ .msg = "sec_no_secure_access"
+ },
+ {
+ .int_msk = BIT(15),
+ .msg = "sec_wrapping_key_auth_err"
+ },
+ {
+ .int_msk = BIT(16),
+ .msg = "sec_km_key_crc_fail"
+ },
+ {
+ .int_msk = BIT(17),
+ .msg = "sec_axi_poison_err"
+ },
+ {
+ .int_msk = BIT(18),
+ .msg = "sec_sva_err"
+ },
+ {}
};
static const char * const sec_dbg_file_name[] = {
- [SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
};
@@ -277,9 +317,7 @@ static u8 sec_get_endian(struct hisi_qm *qm)
"cannot access a register in VF!\n");
return SEC_LE;
}
- reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
-
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
/* BD little endian mode */
if (!(reg & BIT(0)))
return SEC_LE;
@@ -299,13 +337,13 @@ static int sec_engine_init(struct hisi_qm *qm)
u32 reg;
/* disable clock gate control */
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg &= SEC_CLK_GATE_DISABLE;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+ writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
- ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
reg, reg & 0x1, SEC_DELAY_10_US,
SEC_POLL_TIMEOUT_US);
if (ret) {
@@ -313,40 +351,40 @@ static int sec_engine_init(struct hisi_qm *qm)
return ret;
}
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
reg |= SEC_USER0_SMMU_NORMAL;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
- reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
reg &= SEC_USER1_SMMU_MASK;
if (qm->use_sva && qm->ver == QM_HW_V2)
reg |= SEC_USER1_SMMU_SVA;
else
reg |= SEC_USER1_SMMU_NORMAL;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
+ writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
/* Enable sm4 extra mode, as ctr/ecb */
writel_relaxed(SEC_BD_ERR_CHK_EN0,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
/* Enable sm4 xts mode multiple iv */
writel_relaxed(SEC_BD_ERR_CHK_EN1,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
writel_relaxed(SEC_BD_ERR_CHK_EN3,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
/* config endian */
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg |= sec_get_endian(qm);
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
return 0;
}
@@ -381,10 +419,6 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
{
int i;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* clear sec dfx regs */
writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
@@ -406,7 +440,7 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
return;
}
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
/* clear SEC hw error source if having */
writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
@@ -422,14 +456,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
/* enable SEC block master OOO when m-bit error occur */
val = val | SEC_AXI_SHUTDOWN_ENABLE;
- writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val, qm->io_base + SEC_CONTROL_REG);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
{
u32 val;
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
/* disable RAS int */
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
@@ -442,51 +476,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
/* disable SEC block master OOO when m-bit error occur */
val = val & SEC_AXI_SHUTDOWN_DISABLE;
- writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
-}
-
-static u32 sec_current_qm_read(struct sec_debug_file *file)
-{
- struct hisi_qm *qm = file->qm;
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
-{
- struct hisi_qm *qm = file->qm;
- u32 vfq_num;
- u32 tmp;
-
- if (val > qm->vfs_num)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (!val) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
-
- if (val == qm->vfs_num)
- qm->debug.curr_qm_qp_num =
- qm->ctrl_qp_num - qm->qp_num -
- (qm->vfs_num - 1) * vfq_num;
- else
- qm->debug.curr_qm_qp_num = vfq_num;
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
+ writel(val, qm->io_base + SEC_CONTROL_REG);
}
static u32 sec_clear_enable_read(struct sec_debug_file *file)
@@ -523,9 +513,6 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case SEC_CURRENT_QM:
- val = sec_current_qm_read(file);
- break;
case SEC_CLEAR_ENABLE:
val = sec_clear_enable_read(file);
break;
@@ -566,11 +553,6 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case SEC_CURRENT_QM:
- ret = sec_current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case SEC_CLEAR_ENABLE:
ret = sec_clear_enable_write(file, val);
if (ret)
@@ -655,7 +637,7 @@ static int sec_debug_init(struct hisi_qm *qm)
int i;
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+ for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
spin_lock_init(&sec->debug.files[i].lock);
sec->debug.files[i].index = i;
sec->debug.files[i].qm = qm;
@@ -712,7 +694,8 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
err_val = readl(qm->io_base +
SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
- SEC_ECC_NUM(err_val));
+ ((err_val) >> SEC_ECC_NUM) &
+ SEC_ECC_MASH);
}
}
errs++;
@@ -733,9 +716,23 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
+ err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+ err_info->msi_wr_port = BIT(0);
+ err_info->acpi_rst = "SRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+ QM_ACC_WB_NOT_READY_TIMEOUT;
}
static const struct hisi_qm_err_ini sec_err_ini = {
@@ -746,16 +743,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
- .dev_ce_mask = SEC_RAS_CE_ENB_MSK,
- .msi_wr_port = BIT(0),
- .acpi_rst = "SRST",
- }
+ .err_info_init = sec_err_info_init,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -763,12 +751,8 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
- else
- qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
-
qm->err_ini = &sec_err_ini;
+ qm->err_ini->err_info_init(qm);
ret = sec_set_user_domain_and_cache(qm);
if (ret)
@@ -786,7 +770,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "cipher\ndigest\naead\n";
+ qm->algs = "cipher\ndigest\naead";
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -909,10 +893,15 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
- ret = hisi_qm_alg_register(qm, &sec_devices);
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_qm_stop;
+ if (qm->qp_num >= ctx_q_num) {
+ ret = hisi_qm_alg_register(qm, &sec_devices);
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_qm_stop;
+ }
+ } else {
+ pci_warn(qm->pdev,
+ "Failed to use kernel mode, qp not enough!\n");
}
if (qm->uacce) {
@@ -948,7 +937,9 @@ static void sec_remove(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
hisi_qm_wait_task_finish(qm, &sec_devices);
- hisi_qm_alg_unregister(qm, &sec_devices);
+ if (qm->qp_num >= ctx_q_num)
+ hisi_qm_alg_unregister(qm, &sec_devices);
+
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, true);
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 3bff6394acaf..057273769f26 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -56,7 +56,7 @@ struct hisi_acc_sgl_pool {
struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr)
{
- u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0;
+ u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl;
struct hisi_acc_sgl_pool *pool;
struct mem_block *block;
u32 i, j;
@@ -66,6 +66,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
sizeof(struct hisi_acc_hw_sgl);
+
+ /*
+ * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1),
+ * block size may exceed 2^31 on ia64, so the max of block size is 2^31
+ */
block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
PAGE_SHIFT + MAX_ORDER - 1 : 31);
sgl_num_per_block = block_size / sgl_size;
@@ -85,8 +90,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
block[i].sgl = dma_alloc_coherent(dev, block_size,
&block[i].sgl_dma,
GFP_KERNEL);
- if (!block[i].sgl)
+ if (!block[i].sgl) {
+ dev_err(dev, "Fail to allocate hw SG buffer!\n");
goto err_free_mem;
+ }
block[i].size = block_size;
}
@@ -95,8 +102,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
&block[i].sgl_dma,
GFP_KERNEL);
- if (!block[i].sgl)
+ if (!block[i].sgl) {
+ dev_err(dev, "Fail to allocate remained hw SG buffer!\n");
goto err_free_mem;
+ }
block[i].size = remain_sgl * sgl_size;
}
@@ -167,6 +176,7 @@ static void sg_map_to_hw_sg(struct scatterlist *sgl,
{
hw_sge->buf = sg_dma_address(sgl);
hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
+ hw_sge->page_ctrl = sg_virt(sgl);
}
static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
@@ -182,6 +192,18 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
}
+static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
+{
+ struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
+ int i;
+
+ for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
+ hw_sge[i].page_ctrl = NULL;
+ hw_sge[i].buf = 0;
+ hw_sge[i].len = 0;
+ }
+}
+
/**
* hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl.
* @dev: The device which hw sgl belongs to.
@@ -211,16 +233,19 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
sg_n = sg_nents(sgl);
sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- if (!sg_n_mapped)
+ if (!sg_n_mapped) {
+ dev_err(dev, "DMA mapping for SG error!\n");
return ERR_PTR(-EINVAL);
+ }
if (sg_n_mapped > pool->sge_nr) {
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
return ERR_PTR(-EINVAL);
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
+ dev_err(dev, "Get SGL error!\n");
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-ENOMEM);
@@ -256,7 +281,7 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
return;
dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
-
+ clear_hw_sgl_sge(hw_sgl);
hw_sgl->entry_sum_in_chain = 0;
hw_sgl->entry_sum_in_sgl = 0;
hw_sgl->entry_length_in_sgl = 0;
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 29712685498a..829f2caf0f67 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -18,6 +18,8 @@
#define HISI_TRNG_REG 0x00F0
#define HISI_TRNG_BYTES 4
#define HISI_TRNG_QUALITY 512
+#define HISI_TRNG_VERSION 0x01B8
+#define HISI_TRNG_VER_V1 GENMASK(31, 0)
#define SLEEP_US 10
#define TIMEOUT_US 10000
#define SW_DRBG_NUM_SHIFT 2
@@ -50,6 +52,7 @@ struct hisi_trng {
struct hisi_trng_list *trng_list;
struct list_head list;
struct hwrng rng;
+ u32 ver;
bool is_used;
struct mutex mutex;
};
@@ -260,6 +263,7 @@ static int hisi_trng_probe(struct platform_device *pdev)
return PTR_ERR(trng->base);
trng->is_used = false;
+ trng->ver = readl(trng->base + HISI_TRNG_VERSION);
if (!trng_devices.is_init) {
INIT_LIST_HEAD(&trng_devices.list);
mutex_init(&trng_devices.lock);
@@ -267,7 +271,8 @@ static int hisi_trng_probe(struct platform_device *pdev)
}
hisi_trng_add_to_list(trng);
- if (atomic_inc_return(&trng_active_devs) == 1) {
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_inc_return(&trng_active_devs) == 1) {
ret = crypto_register_rng(&hisi_trng_alg);
if (ret) {
dev_err(&pdev->dev,
@@ -289,7 +294,8 @@ static int hisi_trng_probe(struct platform_device *pdev)
return ret;
err_crypto_unregister:
- if (atomic_dec_return(&trng_active_devs) == 0)
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_dec_return(&trng_active_devs) == 0)
crypto_unregister_rng(&hisi_trng_alg);
err_remove_from_list:
@@ -305,7 +311,8 @@ static int hisi_trng_remove(struct platform_device *pdev)
while (hisi_trng_del_from_list(trng))
;
- if (atomic_dec_return(&trng_active_devs) == 0)
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_dec_return(&trng_active_devs) == 0)
crypto_unregister_rng(&hisi_trng_alg);
return 0;
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 92397f993e23..517fdbdff3ea 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -33,35 +33,55 @@ struct hisi_zip_sqe {
u32 consumed;
u32 produced;
u32 comp_data_length;
+ /*
+ * status: 0~7 bits
+ * rsvd: 8~31 bits
+ */
u32 dw3;
u32 input_data_length;
- u32 lba_l;
- u32 lba_h;
+ u32 dw5;
+ u32 dw6;
+ /*
+ * in_sge_data_offset: 0~23 bits
+ * rsvd: 24~27 bits
+ * sqe_type: 29~31 bits
+ */
u32 dw7;
+ /*
+ * out_sge_data_offset: 0~23 bits
+ * rsvd: 24~31 bits
+ */
u32 dw8;
+ /*
+ * request_type: 0~7 bits
+ * buffer_type: 8~11 bits
+ * rsvd: 13~31 bits
+ */
u32 dw9;
u32 dw10;
- u32 priv_info;
+ u32 dw11;
u32 dw12;
- u32 tag;
+ /* tag: in sqe type 0 */
+ u32 dw13;
u32 dest_avail_out;
- u32 rsvd0;
- u32 comp_head_addr_l;
- u32 comp_head_addr_h;
+ u32 dw15;
+ u32 dw16;
+ u32 dw17;
u32 source_addr_l;
u32 source_addr_h;
u32 dest_addr_l;
u32 dest_addr_h;
- u32 stream_ctx_addr_l;
- u32 stream_ctx_addr_h;
- u32 cipher_key1_addr_l;
- u32 cipher_key1_addr_h;
- u32 cipher_key2_addr_l;
- u32 cipher_key2_addr_h;
+ u32 dw22;
+ u32 dw23;
+ u32 dw24;
+ u32 dw25;
+ /* tag: in sqe type 3 */
+ u32 dw26;
+ u32 dw27;
u32 rsvd1[4];
};
int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
-int hisi_zip_register_to_crypto(void);
-void hisi_zip_unregister_from_crypto(void);
+int hisi_zip_register_to_crypto(struct hisi_qm *qm);
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 08b4660b014c..9520a4113c81 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -10,6 +10,7 @@
#define HZIP_BD_STATUS_M GENMASK(7, 0)
/* hisi_zip_sqe dw7 */
#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+#define HZIP_SQE_TYPE_M GENMASK(31, 28)
/* hisi_zip_sqe dw8 */
#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
@@ -91,8 +92,22 @@ struct hisi_zip_qp_ctx {
struct hisi_zip_ctx *ctx;
};
+struct hisi_zip_sqe_ops {
+ u8 sqe_type;
+ void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
+ void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
+ void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
+ u32 (*get_tag)(struct hisi_zip_sqe *sqe);
+ u32 (*get_status)(struct hisi_zip_sqe *sqe);
+ u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
+};
+
struct hisi_zip_ctx {
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
+ const struct hisi_zip_sqe_ops *ops;
};
static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
@@ -119,35 +134,367 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
-static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
+static u16 get_extra_field_size(const u8 *start)
+{
+ return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
+}
+
+static u32 get_name_field_size(const u8 *start)
+{
+ return strlen(start) + 1;
+}
+
+static u32 get_comment_field_size(const u8 *start)
+{
+ return strlen(start) + 1;
+}
+
+static u32 __get_gzip_head_size(const u8 *src)
+{
+ u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
+ u32 size = GZIP_HEAD_FEXTRA_SHIFT;
+
+ if (head_flg & GZIP_HEAD_FEXTRA_BIT)
+ size += get_extra_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FNAME_BIT)
+ size += get_name_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
+ size += get_comment_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FHCRC_BIT)
+ size += GZIP_HEAD_FHCRC_SIZE;
+
+ return size;
+}
+
+static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+{
+ char buf[HZIP_GZIP_HEAD_BUF];
+
+ sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
+
+ return __get_gzip_head_size(buf);
+}
+
+static int add_comp_head(struct scatterlist *dst, u8 req_type)
+{
+ int head_size = TO_HEAD_SIZE(req_type);
+ const u8 *head = TO_HEAD(req_type);
+ int ret;
+
+ ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
+ if (ret != head_size) {
+ pr_err("the head size of buffer is wrong (%d)!\n", ret);
+ return -ENOMEM;
+ }
+
+ return head_size;
+}
+
+static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
+{
+ if (!acomp_req->src || !acomp_req->slen)
+ return -EINVAL;
+
+ if (req_type == HZIP_ALG_TYPE_GZIP &&
+ acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+ return -EINVAL;
+
+ switch (req_type) {
+ case HZIP_ALG_TYPE_ZLIB:
+ return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
+ case HZIP_ALG_TYPE_GZIP:
+ return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
+ default:
+ pr_err("request type does not support!\n");
+ return -EINVAL;
+ }
+}
+
+static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
+ struct hisi_zip_qp_ctx *qp_ctx,
+ size_t head_size, bool is_comp)
+{
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+ struct hisi_zip_req *q = req_q->q;
+ struct hisi_zip_req *req_cache;
+ int req_id;
+
+ write_lock(&req_q->req_lock);
+
+ req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
+ if (req_id >= req_q->size) {
+ write_unlock(&req_q->req_lock);
+ dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
+ return ERR_PTR(-EAGAIN);
+ }
+ set_bit(req_id, req_q->req_bitmap);
+
+ req_cache = q + req_id;
+ req_cache->req_id = req_id;
+ req_cache->req = req;
+
+ if (is_comp) {
+ req_cache->sskip = 0;
+ req_cache->dskip = head_size;
+ } else {
+ req_cache->sskip = head_size;
+ req_cache->dskip = 0;
+ }
+
+ write_unlock(&req_q->req_lock);
+
+ return req_cache;
+}
+
+static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
+ struct hisi_zip_req *req)
+{
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+
+ write_lock(&req_q->req_lock);
+ clear_bit(req->req_id, req_q->req_bitmap);
+ memset(req, 0, sizeof(struct hisi_zip_req));
+ write_unlock(&req_q->req_lock);
+}
+
+static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ sqe->source_addr_l = lower_32_bits(req->dma_src);
+ sqe->source_addr_h = upper_32_bits(req->dma_src);
+ sqe->dest_addr_l = lower_32_bits(req->dma_dst);
+ sqe->dest_addr_h = upper_32_bits(req->dma_dst);
+}
+
+static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ struct acomp_req *a_req = req->req;
+
+ sqe->input_data_length = a_req->slen - req->sskip;
+ sqe->dest_avail_out = a_req->dlen - req->dskip;
+ sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip);
+ sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip);
+}
+
+static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
{
u32 val;
- val = (sqe->dw9) & ~HZIP_BUF_TYPE_M;
+ val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
sqe->dw9 = val;
}
-static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
+static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
{
- sqe->tag = tag;
+ u32 val;
+
+ val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
+ val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
+ sqe->dw9 = val;
}
-static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
- dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen, u32 sskip, u32 dskip)
+static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
{
+ sqe->dw13 = req->req_id;
+}
+
+static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ sqe->dw26 = req->req_id;
+}
+
+static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
+{
+ u32 val;
+
+ val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
+ val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
+ sqe->dw7 = val;
+}
+
+static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
+ u8 req_type, struct hisi_zip_req *req)
+{
+ const struct hisi_zip_sqe_ops *ops = ctx->ops;
+
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
- sqe->input_data_length = slen - sskip;
- sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
- sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
- sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
- sqe->dest_avail_out = dlen - dskip;
- sqe->source_addr_l = lower_32_bits(s_addr);
- sqe->source_addr_h = upper_32_bits(s_addr);
- sqe->dest_addr_l = lower_32_bits(d_addr);
- sqe->dest_addr_h = upper_32_bits(d_addr);
+ ops->fill_addr(sqe, req);
+ ops->fill_buf_size(sqe, req);
+ ops->fill_buf_type(sqe, HZIP_SGL);
+ ops->fill_req_type(sqe, req_type);
+ ops->fill_tag(sqe, req);
+ ops->fill_sqe_type(sqe, ops->sqe_type);
+}
+
+static int hisi_zip_do_work(struct hisi_zip_req *req,
+ struct hisi_zip_qp_ctx *qp_ctx)
+{
+ struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct acomp_req *a_req = req->req;
+ struct hisi_qp *qp = qp_ctx->qp;
+ struct device *dev = &qp->qm->pdev->dev;
+ struct hisi_zip_sqe zip_sqe;
+ int ret;
+
+ if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+ return -EINVAL;
+
+ req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
+ req->req_id << 1, &req->dma_src);
+ if (IS_ERR(req->hw_src)) {
+ dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
+ PTR_ERR(req->hw_src));
+ return PTR_ERR(req->hw_src);
+ }
+
+ req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
+ (req->req_id << 1) + 1,
+ &req->dma_dst);
+ if (IS_ERR(req->hw_dst)) {
+ ret = PTR_ERR(req->hw_dst);
+ dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
+ ret);
+ goto err_unmap_input;
+ }
+
+ hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
+
+ /* send command to start a task */
+ atomic64_inc(&dfx->send_cnt);
+ ret = hisi_qp_send(qp, &zip_sqe);
+ if (ret < 0) {
+ atomic64_inc(&dfx->send_busy_cnt);
+ ret = -EAGAIN;
+ dev_dbg_ratelimited(dev, "failed to send request!\n");
+ goto err_unmap_output;
+ }
+
+ return -EINPROGRESS;
+
+err_unmap_output:
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
+err_unmap_input:
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
+ return ret;
+}
+
+static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw13;
+}
+
+static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw26;
+}
+
+static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw3 & HZIP_BD_STATUS_M;
+}
+
+static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
+{
+ return sqe->produced;
+}
+
+static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
+{
+ struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
+ const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+ struct device *dev = &qp->qm->pdev->dev;
+ struct hisi_zip_sqe *sqe = data;
+ u32 tag = ops->get_tag(sqe);
+ struct hisi_zip_req *req = req_q->q + tag;
+ struct acomp_req *acomp_req = req->req;
+ u32 status, dlen, head_size;
+ int err = 0;
+
+ atomic64_inc(&dfx->recv_cnt);
+ status = ops->get_status(sqe);
+ if (status != 0 && status != HZIP_NC_ERR) {
+ dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
+ (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
+ sqe->produced);
+ atomic64_inc(&dfx->err_bd_cnt);
+ err = -EIO;
+ }
+
+ dlen = ops->get_dstlen(sqe);
+
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
+
+ head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
+ acomp_req->dlen = dlen + head_size;
+
+ if (acomp_req->base.complete)
+ acomp_request_complete(acomp_req, err);
+
+ hisi_zip_remove_req(qp_ctx, req);
+}
+
+static int hisi_zip_acompress(struct acomp_req *acomp_req)
+{
+ struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
+ struct hisi_zip_req *req;
+ int head_size;
+ int ret;
+
+ /* let's output compression head now */
+ head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
+ head_size);
+ return head_size;
+ }
+
+ req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = hisi_zip_do_work(req, qp_ctx);
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
+ hisi_zip_remove_req(qp_ctx, req);
+ }
+
+ return ret;
+}
+
+static int hisi_zip_adecompress(struct acomp_req *acomp_req)
+{
+ struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
+ struct hisi_zip_req *req;
+ int head_size, ret;
+
+ head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
+ head_size);
+ return head_size;
+ }
+
+ req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = hisi_zip_do_work(req, qp_ctx);
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
+ ret);
+ hisi_zip_remove_req(qp_ctx, req);
+ }
+
+ return ret;
}
static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
@@ -177,9 +524,36 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
hisi_qm_release_qp(ctx->qp);
}
+static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
+ .sqe_type = 0,
+ .fill_addr = hisi_zip_fill_addr,
+ .fill_buf_size = hisi_zip_fill_buf_size,
+ .fill_buf_type = hisi_zip_fill_buf_type,
+ .fill_req_type = hisi_zip_fill_req_type,
+ .fill_tag = hisi_zip_fill_tag_v1,
+ .fill_sqe_type = hisi_zip_fill_sqe_type,
+ .get_tag = hisi_zip_get_tag_v1,
+ .get_status = hisi_zip_get_status,
+ .get_dstlen = hisi_zip_get_dstlen,
+};
+
+static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = {
+ .sqe_type = 0x3,
+ .fill_addr = hisi_zip_fill_addr,
+ .fill_buf_size = hisi_zip_fill_buf_size,
+ .fill_buf_type = hisi_zip_fill_buf_type,
+ .fill_req_type = hisi_zip_fill_req_type,
+ .fill_tag = hisi_zip_fill_tag_v2,
+ .fill_sqe_type = hisi_zip_fill_sqe_type,
+ .get_tag = hisi_zip_get_tag_v2,
+ .get_status = hisi_zip_get_status,
+ .get_dstlen = hisi_zip_get_dstlen,
+};
+
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
{
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
+ struct hisi_zip_qp_ctx *qp_ctx;
struct hisi_zip *hisi_zip;
int ret, i, j;
@@ -193,8 +567,9 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
- ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
- req_type);
+ qp_ctx = &hisi_zip_ctx->qp_ctx[i];
+ qp_ctx->ctx = hisi_zip_ctx;
+ ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
if (ret) {
for (j = i - 1; j >= 0; j--)
hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
@@ -203,9 +578,14 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
return ret;
}
- hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip;
+ qp_ctx->zip_dev = hisi_zip;
}
+ if (hisi_zip->qm.ver < QM_HW_V3)
+ hisi_zip_ctx->ops = &hisi_zip_ops_v1;
+ else
+ hisi_zip_ctx->ops = &hisi_zip_ops_v2;
+
return 0;
}
@@ -217,38 +597,6 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
}
-static u16 get_extra_field_size(const u8 *start)
-{
- return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
-}
-
-static u32 get_name_field_size(const u8 *start)
-{
- return strlen(start) + 1;
-}
-
-static u32 get_comment_field_size(const u8 *start)
-{
- return strlen(start) + 1;
-}
-
-static u32 __get_gzip_head_size(const u8 *src)
-{
- u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
- u32 size = GZIP_HEAD_FEXTRA_SHIFT;
-
- if (head_flg & GZIP_HEAD_FEXTRA_BIT)
- size += get_extra_field_size(src + size);
- if (head_flg & GZIP_HEAD_FNAME_BIT)
- size += get_name_field_size(src + size);
- if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
- size += get_comment_field_size(src + size);
- if (head_flg & GZIP_HEAD_FHCRC_BIT)
- size += GZIP_HEAD_FHCRC_SIZE;
-
- return size;
-}
-
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
{
struct hisi_zip_req_q *req_q;
@@ -336,52 +684,6 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
ctx->qp_ctx[i].sgl_pool);
}
-static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
- struct hisi_zip_req *req)
-{
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
-
- write_lock(&req_q->req_lock);
- clear_bit(req->req_id, req_q->req_bitmap);
- memset(req, 0, sizeof(struct hisi_zip_req));
- write_unlock(&req_q->req_lock);
-}
-
-static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
-{
- struct hisi_zip_sqe *sqe = data;
- struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
- struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- struct hisi_zip_req *req = req_q->q + sqe->tag;
- struct acomp_req *acomp_req = req->req;
- struct device *dev = &qp->qm->pdev->dev;
- u32 status, dlen, head_size;
- int err = 0;
-
- atomic64_inc(&dfx->recv_cnt);
- status = sqe->dw3 & HZIP_BD_STATUS_M;
- if (status != 0 && status != HZIP_NC_ERR) {
- dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
- (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
- sqe->produced);
- atomic64_inc(&dfx->err_bd_cnt);
- err = -EIO;
- }
- dlen = sqe->produced;
-
- hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
-
- head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
- acomp_req->dlen = dlen + head_size;
-
- if (acomp_req->base.complete)
- acomp_request_complete(acomp_req, err);
-
- hisi_zip_remove_req(qp_ctx, req);
-}
-
static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
void (*fn)(struct hisi_qp *, void *))
{
@@ -439,204 +741,6 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
hisi_zip_ctx_exit(ctx);
}
-static int add_comp_head(struct scatterlist *dst, u8 req_type)
-{
- int head_size = TO_HEAD_SIZE(req_type);
- const u8 *head = TO_HEAD(req_type);
- int ret;
-
- ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size) {
- pr_err("the head size of buffer is wrong (%d)!\n", ret);
- return -ENOMEM;
- }
-
- return head_size;
-}
-
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
-{
- char buf[HZIP_GZIP_HEAD_BUF];
-
- sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
-
- return __get_gzip_head_size(buf);
-}
-
-static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
-{
- if (!acomp_req->src || !acomp_req->slen)
- return -EINVAL;
-
- if ((req_type == HZIP_ALG_TYPE_GZIP) &&
- (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
- return -EINVAL;
-
- switch (req_type) {
- case HZIP_ALG_TYPE_ZLIB:
- return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
- case HZIP_ALG_TYPE_GZIP:
- return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
- default:
- pr_err("request type does not support!\n");
- return -EINVAL;
- }
-}
-
-static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
- struct hisi_zip_qp_ctx *qp_ctx,
- size_t head_size, bool is_comp)
-{
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- struct hisi_zip_req *q = req_q->q;
- struct hisi_zip_req *req_cache;
- int req_id;
-
- write_lock(&req_q->req_lock);
-
- req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
- if (req_id >= req_q->size) {
- write_unlock(&req_q->req_lock);
- dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- return ERR_PTR(-EAGAIN);
- }
- set_bit(req_id, req_q->req_bitmap);
-
- req_cache = q + req_id;
- req_cache->req_id = req_id;
- req_cache->req = req;
-
- if (is_comp) {
- req_cache->sskip = 0;
- req_cache->dskip = head_size;
- } else {
- req_cache->sskip = head_size;
- req_cache->dskip = 0;
- }
-
- write_unlock(&req_q->req_lock);
-
- return req_cache;
-}
-
-static int hisi_zip_do_work(struct hisi_zip_req *req,
- struct hisi_zip_qp_ctx *qp_ctx)
-{
- struct acomp_req *a_req = req->req;
- struct hisi_qp *qp = qp_ctx->qp;
- struct device *dev = &qp->qm->pdev->dev;
- struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
- struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_sqe zip_sqe;
- dma_addr_t input, output;
- int ret;
-
- if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
- return -EINVAL;
-
- req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
- req->req_id << 1, &input);
- if (IS_ERR(req->hw_src)) {
- dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
- PTR_ERR(req->hw_src));
- return PTR_ERR(req->hw_src);
- }
- req->dma_src = input;
-
- req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
- (req->req_id << 1) + 1,
- &output);
- if (IS_ERR(req->hw_dst)) {
- ret = PTR_ERR(req->hw_dst);
- dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
- ret);
- goto err_unmap_input;
- }
- req->dma_dst = output;
-
- hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen,
- a_req->dlen, req->sskip, req->dskip);
- hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL);
- hisi_zip_config_tag(&zip_sqe, req->req_id);
-
- /* send command to start a task */
- atomic64_inc(&dfx->send_cnt);
- ret = hisi_qp_send(qp, &zip_sqe);
- if (ret < 0) {
- atomic64_inc(&dfx->send_busy_cnt);
- ret = -EAGAIN;
- dev_dbg_ratelimited(dev, "failed to send request!\n");
- goto err_unmap_output;
- }
-
- return -EINPROGRESS;
-
-err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
-err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
- return ret;
-}
-
-static int hisi_zip_acompress(struct acomp_req *acomp_req)
-{
- struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
- struct device *dev = &qp_ctx->qp->qm->pdev->dev;
- struct hisi_zip_req *req;
- int head_size;
- int ret;
-
- /* let's output compression head now */
- head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0) {
- dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
- head_size);
- return head_size;
- }
-
- req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
- dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
- hisi_zip_remove_req(qp_ctx, req);
- }
-
- return ret;
-}
-
-static int hisi_zip_adecompress(struct acomp_req *acomp_req)
-{
- struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
- struct device *dev = &qp_ctx->qp->qm->pdev->dev;
- struct hisi_zip_req *req;
- int head_size, ret;
-
- head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
- if (head_size < 0) {
- dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
- head_size);
- return head_size;
- }
-
- req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
- dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
- ret);
- hisi_zip_remove_req(qp_ctx, req);
- }
-
- return ret;
-}
-
static struct acomp_alg hisi_zip_acomp_zlib = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
@@ -665,7 +769,7 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
}
};
-int hisi_zip_register_to_crypto(void)
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
{
int ret;
@@ -684,7 +788,7 @@ int hisi_zip_register_to_crypto(void)
return ret;
}
-void hisi_zip_unregister_from_crypto(void)
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
{
crypto_unregister_acomp(&hisi_zip_acomp_gzip);
crypto_unregister_acomp(&hisi_zip_acomp_zlib);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 02c445722445..2178b40e9f82 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -18,7 +18,6 @@
#define PCI_DEVICE_ID_ZIP_VF 0xa251
#define HZIP_QUEUE_NUM_V1 4096
-#define HZIP_QUEUE_NUM_V2 1024
#define HZIP_CLOCK_GATE_CTRL 0x301004
#define COMP0_ENABLE BIT(0)
@@ -69,10 +68,10 @@
#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
-#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
+#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
-#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
+#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
#define HZIP_COMP_CORE_NUM 2
#define HZIP_DECOMP_CORE_NUM 6
#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
@@ -134,17 +133,17 @@ static const struct hisi_zip_hw_error zip_hw_error[] = {
{ .int_msk = BIT(8), .msg = "zip_com_inf_err" },
{ .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
{ .int_msk = BIT(10), .msg = "zip_pre_out_err" },
+ { .int_msk = BIT(11), .msg = "zip_axi_poison_err" },
+ { .int_msk = BIT(12), .msg = "zip_sva_err" },
{ /* sentinel */ }
};
enum ctrl_debug_file_index {
- HZIP_CURRENT_QM,
HZIP_CLEAR_ENABLE,
HZIP_DEBUG_FILE_NUM,
};
static const char * const ctrl_debug_file_name[] = {
- [HZIP_CURRENT_QM] = "current_qm",
[HZIP_CLEAR_ENABLE] = "clear_enable",
};
@@ -363,48 +362,6 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
return &hisi_zip->qm;
}
-static u32 current_qm_read(struct ctrl_debug_file *file)
-{
- struct hisi_qm *qm = file_to_qm(file);
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int current_qm_write(struct ctrl_debug_file *file, u32 val)
-{
- struct hisi_qm *qm = file_to_qm(file);
- u32 vfq_num;
- u32 tmp;
-
- if (val > qm->vfs_num)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
- if (val == qm->vfs_num)
- qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
- qm->qp_num - (qm->vfs_num - 1) * vfq_num;
- else
- qm->debug.curr_qm_qp_num = vfq_num;
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
-}
-
static u32 clear_enable_read(struct ctrl_debug_file *file)
{
struct hisi_qm *qm = file_to_qm(file);
@@ -438,9 +395,6 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case HZIP_CURRENT_QM:
- val = current_qm_read(file);
- break;
case HZIP_CLEAR_ENABLE:
val = clear_enable_read(file);
break;
@@ -478,11 +432,6 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
spin_lock_irq(&file->lock);
switch (file->index) {
- case HZIP_CURRENT_QM:
- ret = current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case HZIP_CLEAR_ENABLE:
ret = clear_enable_write(file, val);
if (ret)
@@ -580,7 +529,7 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
int i;
- for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
+ for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) {
spin_lock_init(&zip->ctrl->files[i].lock);
zip->ctrl->files[i].ctrl = zip->ctrl;
zip->ctrl->files[i].index = i;
@@ -627,10 +576,6 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
int i, j;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* enable register read_clear bit */
writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
@@ -714,6 +659,22 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HZIP_CORE_INT_SET);
}
+static void hisi_zip_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+ err_info->msi_wr_port = HZIP_WR_PORT;
+ err_info->acpi_rst = "ZRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
+
+ if (qm->ver >= QM_HW_V3)
+ err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
+}
+
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.hw_init = hisi_zip_set_user_domain_and_cache,
.hw_err_enable = hisi_zip_hw_error_enable,
@@ -723,16 +684,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE |
- QM_ACC_WB_NOT_READY_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
- .dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE,
- .msi_wr_port = HZIP_WR_PORT,
- .acpi_rst = "ZRST",
- }
+ .err_info_init = hisi_zip_err_info_init,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -746,13 +698,8 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
-
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
- else
- qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
-
qm->err_ini = &hisi_zip_err_ini;
+ qm->err_ini->err_info_init(qm);
hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index e813115d5432..aa4c7b2af3e2 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -963,8 +963,6 @@ static int img_hash_probe(struct platform_device *pdev)
hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdev->io_base)) {
err = PTR_ERR(hdev->io_base);
- dev_err(dev, "can't ioremap, returned %d\n", err);
-
goto res_err;
}
@@ -972,7 +970,6 @@ static int img_hash_probe(struct platform_device *pdev)
hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
if (IS_ERR(hdev->cpu_addr)) {
- dev_err(dev, "can't ioremap write port\n");
err = PTR_ERR(hdev->cpu_addr);
goto res_err;
}
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 6364583b88b2..9ff885d50edf 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -688,7 +688,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Leave the DSE threads reset state */
writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
- /* Configure the procesing engine thresholds */
+ /* Configure the processing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 8b0f17fc09fb..0616e369522e 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -265,7 +265,7 @@ static int setup_crypt_desc(void)
return 0;
}
-static spinlock_t desc_lock;
+static DEFINE_SPINLOCK(desc_lock);
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
@@ -293,7 +293,7 @@ static struct crypt_ctl *get_crypt_desc(void)
}
}
-static spinlock_t emerg_lock;
+static DEFINE_SPINLOCK(emerg_lock);
static struct crypt_ctl *get_crypt_desc_emerg(void)
{
int i;
@@ -1379,9 +1379,6 @@ static int __init ixp_module_init(void)
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- spin_lock_init(&desc_lock);
- spin_lock_init(&emerg_lock);
-
err = init_ixp_crypto(&pdev->dev);
if (err) {
platform_device_unregister(pdev);
diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
index b6b25d994af3..e2a39fdaf623 100644
--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
@@ -1623,10 +1623,8 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
}
aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem);
- if (IS_ERR(aes_dev->base_reg)) {
- dev_err(dev, "Failed to get base address\n");
+ if (IS_ERR(aes_dev->base_reg))
return PTR_ERR(aes_dev->base_reg);
- }
/* Get and request IRQ */
aes_dev->irq = platform_get_irq(pdev, 0);
@@ -1649,8 +1647,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
/* Initialize crypto engine */
aes_dev->engine = crypto_engine_alloc_init(dev, true);
- if (!aes_dev->engine)
+ if (!aes_dev->engine) {
+ rc = -ENOMEM;
goto list_del;
+ }
rc = crypto_engine_start(aes_dev->engine);
if (rc) {
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
index c4b97b4160e9..0379dbf32a4c 100644
--- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
@@ -1192,10 +1192,8 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
}
hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
- if (IS_ERR(hcu_dev->io_base)) {
- dev_err(dev, "Could not io-remap mem resource.\n");
+ if (IS_ERR(hcu_dev->io_base))
return PTR_ERR(hcu_dev->io_base);
- }
init_completion(&hcu_dev->irq_done);
@@ -1220,8 +1218,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
/* Initialize crypto engine */
hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
- if (!hcu_dev->engine)
+ if (!hcu_dev->engine) {
+ rc = -ENOMEM;
goto list_del;
+ }
rc = crypto_engine_start(hcu_dev->engine);
if (rc) {
diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/keembay/ocs-hcu.c
index 81eecacf603a..deb9bd460ee6 100644
--- a/drivers/crypto/keembay/ocs-hcu.c
+++ b/drivers/crypto/keembay/ocs-hcu.c
@@ -93,7 +93,7 @@
#define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
/**
- * struct ocs_hcu_dma_list - An entry in an OCS DMA linked list.
+ * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
* @src_addr: Source address of the data.
* @src_len: Length of data to be fetched.
* @nxt_desc: Next descriptor to fetch.
@@ -107,7 +107,7 @@ struct ocs_hcu_dma_entry {
};
/**
- * struct ocs_dma_list - OCS-specific DMA linked list.
+ * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
* @head: The head of the list (points to the array backing the list).
* @tail: The current tail of the list; NULL if the list is empty.
* @dma_addr: The DMA address of @head (i.e., the DMA address of the backing
@@ -597,7 +597,7 @@ int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
}
/**
- * ocs_hcu_digest() - Perform a hashing iteration.
+ * ocs_hcu_hash_update() - Perform a hashing iteration.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dma_list: The OCS DMA list mapping the input data to process.
@@ -632,7 +632,7 @@ int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
}
/**
- * ocs_hcu_hash_final() - Update and finalize hash computation.
+ * ocs_hcu_hash_finup() - Update and finalize hash computation.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dma_list: The OCS DMA list mapping the input data to process.
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 3518fac29834..ecedd91a8d85 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -121,14 +121,14 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
struct pci_dev *pdev);
-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
- struct pci_dev *pdev, u64 reg, u64 *val);
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val, int blkaddr);
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val);
+ u64 reg, u64 val, int blkaddr);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val);
+ u64 reg, u64 *val, int blkaddr);
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val);
+ u64 reg, u64 val, int blkaddr);
struct otx2_cptlfs_info;
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 51cb6404ded7..9074876d38e5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -43,7 +43,7 @@ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val)
+ u64 reg, u64 *val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@@ -62,12 +62,13 @@ int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
reg_msg->is_write = 0;
reg_msg->reg_offset = reg;
reg_msg->ret_val = val;
+ reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val)
+ u64 reg, u64 val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@@ -86,16 +87,17 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
reg_msg->is_write = 1;
reg_msg->reg_offset = reg;
reg_msg->val = val;
+ reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val)
+ u64 reg, u64 *val, int blkaddr)
{
int ret;
- ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
+ ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
@@ -103,11 +105,11 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
}
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val)
+ u64 reg, u64 val, int blkaddr)
{
int ret;
- ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
+ ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index 823a4571fd67..34aba1532761 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -56,7 +56,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- &lf_ctrl.u);
+ &lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
@@ -64,7 +64,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- lf_ctrl.u);
+ lf_ctrl.u, lfs->blkaddr);
return ret;
}
@@ -77,7 +77,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- &lf_ctrl.u);
+ &lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
@@ -85,7 +85,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- lf_ctrl.u);
+ lf_ctrl.u, lfs->blkaddr);
return ret;
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 314e97354100..ab1678fc564d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -95,6 +95,7 @@ struct otx2_cptlfs_info {
u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
+ int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
};
static inline void otx2_cpt_free_instruction_queues(
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 8c899ad531a5..e19af1356f12 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -51,6 +51,7 @@ struct otx2_cptpf_dev {
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
u8 kvf_limits; /* Kernel crypto limits */
+ bool has_cpt1;
};
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 5277e04badd9..58f47e3ab62e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -451,19 +451,19 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
return 0;
}
-static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
{
int timeout = 10, ret;
u64 reg = 0;
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, 0x1);
+ CPT_AF_BLK_RST, 0x1, blkaddr);
if (ret)
return ret;
do {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, &reg);
+ CPT_AF_BLK_RST, &reg, blkaddr);
if (ret)
return ret;
@@ -478,11 +478,35 @@ static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
return ret;
}
+static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+{
+ int ret = 0;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_device_reset(cptpf, BLKADDR_CPT0);
+}
+
+static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
+{
+ u64 cfg;
+
+ cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
+ if (cfg & BIT_ULL(11))
+ cptpf->has_cpt1 = true;
+}
+
static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
{
union otx2_cptx_af_constants1 af_cnsts1 = {0};
int ret = 0;
+ /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
+ cptpf_check_block_implemented(cptpf);
/* Reset the CPT PF device */
ret = cptpf_device_reset(cptpf);
if (ret)
@@ -490,7 +514,8 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* Get number of SE, IE and AE engines */
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_CONSTANTS1, &af_cnsts1.u);
+ CPT_AF_CONSTANTS1, &af_cnsts1.u,
+ BLKADDR_CPT0);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1dc3ba298139..a531f4c8b441 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -153,16 +153,16 @@ static int get_ucode_type(struct device *dev,
}
static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, int blkaddr)
{
return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_UCODE_BASE(eng),
- (u64)dma_addr);
+ (u64)dma_addr, blkaddr);
}
-static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
+static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf, int blkaddr)
{
- struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_engs_rsvd *engs;
dma_addr_t dma_addr;
int i, bit, ret;
@@ -170,7 +170,7 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
- cptpf->pf_id << RVU_PFVF_PF_SHIFT);
+ cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
if (ret)
return ret;
@@ -187,7 +187,8 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
*/
for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
if (!eng_grp->g->eng_ref_cnt[bit]) {
- ret = __write_ucode_base(cptpf, bit, dma_addr);
+ ret = __write_ucode_base(cptpf, bit, dma_addr,
+ blkaddr);
if (ret)
return ret;
}
@@ -195,23 +196,32 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
return 0;
}
-static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
- void *obj)
+static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
- struct otx2_cpt_bitmap bmap;
+ int ret;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
+}
+
+static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_bitmap bmap,
+ int blkaddr)
+{
int i, timeout = 10;
int busy, ret;
u64 reg = 0;
- bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
- if (!bmap.size)
- return -EINVAL;
-
/* Detach the cores from group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), &reg);
+ CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
if (ret)
return ret;
@@ -221,7 +231,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), reg);
+ CPT_AF_EXEX_CTL2(i), reg,
+ blkaddr);
if (ret)
return ret;
}
@@ -237,7 +248,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_STS(i), &reg);
+ CPT_AF_EXEX_STS(i), &reg,
+ blkaddr);
if (ret)
return ret;
@@ -253,7 +265,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
if (!eng_grp->g->eng_ref_cnt[i]) {
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x0);
+ CPT_AF_EXEX_CTL(i), 0x0,
+ blkaddr);
if (ret)
return ret;
}
@@ -262,22 +275,39 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
return 0;
}
-static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
- void *obj)
+static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_bitmap bmap;
- u64 reg = 0;
- int i, ret;
+ int ret;
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
+ if (cptpf->has_cpt1) {
+ ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT0);
+}
+
+static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_bitmap bmap,
+ int blkaddr)
+{
+ u64 reg = 0;
+ int i, ret;
+
/* Attach the cores to the group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), &reg);
+ CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
if (ret)
return ret;
@@ -287,7 +317,8 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), reg);
+ CPT_AF_EXEX_CTL2(i), reg,
+ blkaddr);
if (ret)
return ret;
}
@@ -295,15 +326,33 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
/* Enable the cores */
for_each_set_bit(i, bmap.bits, bmap.size) {
- ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
- cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x1);
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x1,
+ blkaddr);
if (ret)
return ret;
}
- ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+ return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
- return ret;
+static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ int ret;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
}
static int load_fw(struct device *dev, struct fw_info_t *fw_info,
@@ -1140,20 +1189,18 @@ release_fw:
return ret;
}
-int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
+ int blkaddr)
{
- int i, ret, busy, total_cores;
- int timeout = 10;
- u64 reg = 0;
-
- total_cores = cptpf->eng_grps.avail.max_se_cnt +
- cptpf->eng_grps.avail.max_ie_cnt +
- cptpf->eng_grps.avail.max_ae_cnt;
+ int timeout = 10, ret;
+ int i, busy;
+ u64 reg;
/* Disengage the cores from groups */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), 0x0);
+ CPT_AF_EXEX_CTL2(i), 0x0,
+ blkaddr);
if (ret)
return ret;
@@ -1173,7 +1220,8 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_STS(i), &reg);
+ CPT_AF_EXEX_STS(i), &reg,
+ blkaddr);
if (ret)
return ret;
@@ -1187,13 +1235,30 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
/* Disable the cores */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x0);
+ CPT_AF_EXEX_CTL(i), 0x0,
+ blkaddr);
if (ret)
return ret;
}
return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
}
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+{
+ int total_cores, ret;
+
+ total_cores = cptpf->eng_grps.avail.max_se_cnt +
+ cptpf->eng_grps.avail.max_ie_cnt +
+ cptpf->eng_grps.avail.max_ae_cnt;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
+}
+
void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps)
{
@@ -1354,6 +1419,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
lfs->pdev = pdev;
lfs->reg_base = cptpf->reg_base;
lfs->mbox = &cptpf->afpf_mbox;
+ lfs->blkaddr = BLKADDR_CPT0;
ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 92e921eceed7..d6314ea9ae89 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CBC routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 4c9362eebefd..e7384d107573 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CCM routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 6d5ce1a66f1e..13f518802343 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CTR routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 77e338dc33f1..7a729dc2bc17 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES ECB routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 19c6ed5baea4..fc9baca13920 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES GCM routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 48dc1c98ca52..eb5c8f689360 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES XCBC routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 13c65deda8e9..446f611726df 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -932,8 +932,10 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
ret = find_nx_device_tree(dn, chip_id, vasid,
NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
- if (ret)
+ if (ret) {
+ of_node_put(dn);
return ret;
+ }
}
if (!ct_842 || !ct_gzip) {
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 90d9a37a57f6..b0ad665e4bda 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* SHA-256 routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index eb8627a0f317..c29103a1a0b6 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* SHA-512 routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 1d0e8a1ba160..010e87d9da36 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
@@ -200,7 +200,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
* @sg: sg list head
* @end: sg lisg end
* @delta: is the amount we need to crop in order to bound the list.
- *
+ * @nbytes: length of data in the scatterlists or data length - whichever
+ * is greater.
*/
static long int trim_sg_list(struct nx_sg *sg,
struct nx_sg *end,
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index 1975bcbee997..ee7cd88bb10a 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* debugfs routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a45bdcf3026d..0dd4c6b157de 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
dd->err = 0;
}
- err = pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
- pm_runtime_put_noidle(dd->dev);
dev_err(dd->dev, "failed to get sync: %d\n", err);
return err;
}
@@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "%s: failed to get_sync(%d)\n",
__func__, err);
@@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
static int omap_aes_resume(struct device *dev)
{
- pm_runtime_get_sync(dev);
+ pm_runtime_resume_and_get(dev);
return 0;
}
#endif
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 6a9be01fdf33..3524ddd48930 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -224,6 +224,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->uof_get_name = uof_get_name;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index f5990d042c9a..1dd64af22bea 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -212,6 +212,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1d1532e8fb6d..067ca5e17d38 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index cadcf12884c8..30337390513c 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -214,6 +214,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 04742a6d91ca..51ea88c0b17d 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5527344546e5..ac435b44f1d2 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -173,6 +173,7 @@ struct adf_hw_device_data {
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
+ void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 1aa17303838d..9e560c7d4163 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -179,3 +179,28 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
return capabilities;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
+
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+ unsigned long accel_mask = hw_data->accel_mask;
+ void __iomem *pmisc_addr;
+ struct adf_bar *pmisc;
+ int pmisc_id;
+ u32 i = 0;
+
+ pmisc_id = hw_data->get_misc_bar_id(hw_data);
+ pmisc = &GET_BARS(accel_dev)[pmisc_id];
+ pmisc_addr = pmisc->virt_addr;
+
+ /* Configures WDT timers */
+ for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+ /* Enable WDT for sym and dc */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
+ /* Enable WDT for pke */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index 3816e6500352..756b0ddfac5e 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -113,11 +113,24 @@ do { \
/* Power gating */
#define ADF_POWERGATE_PKE BIT(24)
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x2000000
+#define ADF_SSMWDT_OFFSET 0x54
+#define ADF_SSMWDTPKE_OFFSET 0x58
+#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
+#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
index b72ff58e0bc7..000528327b29 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
@@ -99,3 +99,43 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+
+static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
+ u32 *lower)
+{
+ *lower = lower_32_bits(value);
+ *upper = upper_32_bits(value);
+}
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+ u32 ssm_wdt_pke_high = 0;
+ u32 ssm_wdt_pke_low = 0;
+ u32 ssm_wdt_high = 0;
+ u32 ssm_wdt_low = 0;
+ void __iomem *pmisc_addr;
+ struct adf_bar *pmisc;
+ int pmisc_id;
+
+ pmisc_id = hw_data->get_misc_bar_id(hw_data);
+ pmisc = &GET_BARS(accel_dev)[pmisc_id];
+ pmisc_addr = pmisc->virt_addr;
+
+ /* Convert 64bit WDT timer value into 32bit values for
+ * mmio write to 32bit CSRs.
+ */
+ adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
+ adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
+ &ssm_wdt_pke_low);
+
+ /* Enable WDT for sym and dc */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
+ /* Enable WDT for pke */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index 8ab62b2ac311..b8fca1ff7aab 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -94,6 +94,18 @@ do { \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_SRV_ARB_EN, (value))
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
+#define ADF_SSMWDTL_OFFSET 0x54
+#define ADF_SSMWDTH_OFFSET 0x5C
+#define ADF_SSMWDTPKEL_OFFSET 0x58
+#define ADF_SSMWDTPKEH_OFFSET 0x60
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 42029153408e..744c40351428 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -162,6 +162,10 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ /* Set ssm watch dog timer */
+ if (hw_data->set_ssm_wdtimer)
+ hw_data->set_ssm_wdtimer(accel_dev);
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index c45853463530..e3ad5587be49 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
ret = adf_isr_alloc_msix_entry_table(accel_dev);
if (ret)
- return ret;
- if (adf_enable_msix(accel_dev))
goto err_out;
- if (adf_setup_bh(accel_dev))
- goto err_out;
+ ret = adf_enable_msix(accel_dev);
+ if (ret)
+ goto err_free_msix_table;
- if (adf_request_irqs(accel_dev))
- goto err_out;
+ ret = adf_setup_bh(accel_dev);
+ if (ret)
+ goto err_disable_msix;
+
+ ret = adf_request_irqs(accel_dev);
+ if (ret)
+ goto err_cleanup_bh;
return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_disable_msix:
+ adf_disable_msix(&accel_dev->accel_pci_dev);
+
+err_free_msix_table:
+ adf_isr_free_msix_entry_table(accel_dev);
+
err_out:
- adf_isr_resource_free(accel_dev);
- return -EFAULT;
+ return ret;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index 8b090b7ae8c6..a1b77bd7a894 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -169,7 +169,7 @@ out:
* @msg: Message to send
* @vf_nr: VF number to which the message will be sent
*
- * Function sends a messge from the PF to a VF
+ * Function sends a message from the PF to a VF
*
* Return: 0 on success, error code otherwise.
*/
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 888c1e047295..8ba28409fb74 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
ring->base_addr, ring->dma_addr);
+ ring->base_addr = NULL;
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index 2c98fb63f7b7..e85bd62d134a 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -8,7 +8,7 @@
* adf_vf2pf_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
- * Function sends an init messge from the VF to a PF
+ * Function sends an init message from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_init);
* adf_vf2pf_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
- * Function sends a shutdown messge from the VF to a PF
+ * Function sends a shutdown message from the VF to a PF
*
* Return: void
*/
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 38d316a42ba6..888388acb6bd 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
goto err_out;
if (adf_setup_pf2vf_bh(accel_dev))
- goto err_out;
+ goto err_disable_msi;
if (adf_setup_bh(accel_dev))
- goto err_out;
+ goto err_cleanup_pf2vf_bh;
if (adf_request_msi_irq(accel_dev))
- goto err_out;
+ goto err_cleanup_bh;
return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_cleanup_pf2vf_bh:
+ adf_cleanup_pf2vf_bh(accel_dev);
+
+err_disable_msi:
+ adf_disable_msi(accel_dev);
+
err_out:
- adf_vf_isr_resource_free(accel_dev);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index ff78c73c47e3..f998ed58457c 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
- dma_addr_t blp;
- dma_addr_t bloutp = 0;
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
@@ -731,9 +731,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!bufl))
return -ENOMEM;
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, blp)))
- goto err_in;
+ for_each_sg(sgl, sg, n, i)
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -750,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err_in;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -764,10 +766,11 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err_in;
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err_out;
+
bufers = buflout->bufers;
+ for_each_sg(sglout, sg, n, i)
+ bufers[i].addr = DMA_MAPPING_ERROR;
+
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -784,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
}
buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err_out;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
qat_req->buf.sz_out = sz_out;
@@ -795,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
return 0;
err_out:
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
n = sg_nents(sglout);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
kfree(buflout);
err_in:
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
@@ -813,8 +823,6 @@ err_in:
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, blp))
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index c972554a755e..29999da716cc 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index cffa9fc628ff..850f257d00f3 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
- struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
struct skcipher_request fallback_req; // keep at the end
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index a73db2a5637f..dceb9579d87a 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
- u32 totallen, u32 offset)
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
@@ -295,19 +294,18 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
{
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
enckeylen / 2);
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+ /* Set data unit size to cryptlen. Anything else causes
+ * crypto engine to return back incorrect results.
+ */
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
}
-static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
- u32 totallen, u32 offset)
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
- qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+ qce_write(qce, REG_ENCR_SEG_START, 0);
if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0);
@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_CNTR_MASK2, ~0);
}
- qce_write(qce, REG_SEG_SIZE, totallen);
+ qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
/* get little endianness */
config = qce_config_reg(qce, 1);
@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
}
#endif
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset)
+int qce_start(struct crypto_async_request *async_req, u32 type)
{
switch (type) {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
- return qce_setup_regs_skcipher(async_req, totallen, offset);
+ return qce_setup_regs_skcipher(async_req);
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
- return qce_setup_regs_ahash(async_req, totallen, offset);
+ return qce_setup_regs_ahash(async_req);
#endif
default:
return -EINVAL;
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 85ba16418a04..3bc244bcca2d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -94,7 +94,6 @@ struct qce_alg_template {
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
int qce_check_status(struct qce_device *qce, u32 *status);
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset);
+int qce_start(struct crypto_async_request *async_req, u32 type);
#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 61c418c12345..8e6fcf2c21cc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -12,9 +12,15 @@
#include "core.h"
#include "sha.h"
-/* crypto hw padding constant for first operation */
-#define SHA_PADDING 64
-#define SHA_PADDING_MASK (SHA_PADDING - 1)
+struct qce_sha_saved_state {
+ u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE];
+ u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE];
+ __be32 byte_count[2];
+ unsigned int pending_buflen;
+ unsigned int flags;
+ u64 count;
+ bool first_blk;
+};
static LIST_HEAD(ahash_algs);
@@ -107,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
@@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req)
static int qce_ahash_export(struct ahash_request *req, void *out)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned long flags = rctx->flags;
- unsigned int digestsize = crypto_ahash_digestsize(ahash);
- unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-
- if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
- struct sha1_state *out_state = out;
-
- out_state->count = rctx->count;
- qce_cpu_to_be32p_array((__be32 *)out_state->state,
- rctx->digest, digestsize);
- memcpy(out_state->buffer, rctx->buf, blocksize);
- } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
- struct sha256_state *out_state = out;
-
- out_state->count = rctx->count;
- qce_cpu_to_be32p_array((__be32 *)out_state->state,
- rctx->digest, digestsize);
- memcpy(out_state->buf, rctx->buf, blocksize);
- } else {
- return -EINVAL;
- }
+ struct qce_sha_saved_state *export_state = out;
- return 0;
-}
-
-static int qce_import_common(struct ahash_request *req, u64 in_count,
- const u32 *state, const u8 *buffer, bool hmac)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned int digestsize = crypto_ahash_digestsize(ahash);
- unsigned int blocksize;
- u64 count = in_count;
-
- blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
- rctx->count = in_count;
- memcpy(rctx->buf, buffer, blocksize);
-
- if (in_count <= blocksize) {
- rctx->first_blk = 1;
- } else {
- rctx->first_blk = 0;
- /*
- * For HMAC, there is a hardware padding done when first block
- * is set. Therefore the byte_count must be incremened by 64
- * after the first block operation.
- */
- if (hmac)
- count += SHA_PADDING;
- }
-
- rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
- rctx->byte_count[1] = (__force __be32)(count >> 32);
- qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
- digestsize);
- rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+ memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
+ memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest));
+ export_state->byte_count[0] = rctx->byte_count[0];
+ export_state->byte_count[1] = rctx->byte_count[1];
+ export_state->pending_buflen = rctx->buflen;
+ export_state->count = rctx->count;
+ export_state->first_blk = rctx->first_blk;
+ export_state->flags = rctx->flags;
return 0;
}
static int qce_ahash_import(struct ahash_request *req, const void *in)
{
- struct qce_sha_reqctx *rctx;
- unsigned long flags;
- bool hmac;
- int ret;
-
- ret = qce_ahash_init(req);
- if (ret)
- return ret;
-
- rctx = ahash_request_ctx(req);
- flags = rctx->flags;
- hmac = IS_SHA_HMAC(flags);
-
- if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
- const struct sha1_state *state = in;
-
- ret = qce_import_common(req, state->count, state->state,
- state->buffer, hmac);
- } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
- const struct sha256_state *state = in;
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ const struct qce_sha_saved_state *import_state = in;
- ret = qce_import_common(req, state->count, state->state,
- state->buf, hmac);
- }
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->count = import_state->count;
+ rctx->buflen = import_state->pending_buflen;
+ rctx->first_blk = import_state->first_blk;
+ rctx->flags = import_state->flags;
+ rctx->byte_count[0] = import_state->byte_count[0];
+ rctx->byte_count[1] = import_state->byte_count[1];
+ memcpy(rctx->buf, import_state->pending_buf, rctx->buflen);
+ memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest));
- return ret;
+ return 0;
}
static int qce_ahash_update(struct ahash_request *req)
@@ -270,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req)
/* calculate how many bytes will be hashed later */
hash_later = total % blocksize;
+
+ /*
+ * At this point, there is more than one block size of data. If
+ * the available data to transfer is exactly a multiple of block
+ * size, save the last block to be transferred in qce_ahash_final
+ * (with the last block bit set) if this is indeed the end of data
+ * stream. If not this saved block will be transferred as part of
+ * next update. If this block is not held back and if this is
+ * indeed the end of data stream, the digest obtained will be wrong
+ * since qce_ahash_final will see that rctx->buflen is 0 and return
+ * doing nothing which in turn means that a digest will not be
+ * copied to the destination result buffer. qce_ahash_final cannot
+ * be made to alter this behavior and allowed to proceed if
+ * rctx->buflen is 0 because the crypto engine BAM does not allow
+ * for zero length transfers.
+ */
+ if (!hash_later)
+ hash_later = blocksize;
+
if (hash_later) {
unsigned int src_offset = req->nbytes - hash_later;
scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
@@ -450,7 +415,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "sha1-qce",
.digestsize = SHA1_DIGEST_SIZE,
.blocksize = SHA1_BLOCK_SIZE,
- .statesize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha1,
},
{
@@ -459,7 +424,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "sha256-qce",
.digestsize = SHA256_DIGEST_SIZE,
.blocksize = SHA256_BLOCK_SIZE,
- .statesize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha256,
},
{
@@ -468,7 +433,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "hmac-sha1-qce",
.digestsize = SHA1_DIGEST_SIZE,
.blocksize = SHA1_BLOCK_SIZE,
- .statesize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha1,
},
{
@@ -477,7 +442,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "hmac-sha256-qce",
.digestsize = SHA256_DIGEST_SIZE,
.blocksize = SHA256_BLOCK_SIZE,
- .statesize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha256,
},
};
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index a2d3da0ad95f..c0a0d8c4fce1 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/errno.h>
#include <crypto/aes.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
@@ -143,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
@@ -167,16 +168,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+ unsigned int __keylen;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+ /*
+ * AES XTS key1 = key2 not supported by crypto engine.
+ * Revisit to request a fallback cipher in this case.
+ */
+ if (IS_XTS(flags)) {
+ __keylen = keylen >> 1;
+ if (!memcmp(key, key + __keylen, __keylen))
+ return -ENOKEY;
+ } else {
+ __keylen = keylen;
+ }
+
+ switch (__keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
memcpy(ctx->enc_key, key, keylen);
break;
+ case AES_KEYSIZE_192:
+ break;
+ default:
+ return -EINVAL;
}
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
@@ -204,12 +222,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+ u32 _key[6];
int err;
err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
+ /*
+ * The crypto engine does not support any two keys
+ * being the same for triple des algorithms. The
+ * verify_skcipher_des3_key does not check for all the
+ * below conditions. Return -ENOKEY in case any two keys
+ * are the same. Revisit to see if a fallback cipher
+ * is needed to handle this condition.
+ */
+ memcpy(_key, key, DES3_EDE_KEY_SIZE);
+ if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
+ !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
+ !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+ return -ENOKEY;
+
ctx->enc_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
return 0;
@@ -221,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ unsigned int blocksize = crypto_skcipher_blocksize(tfm);
int keylen;
int ret;
@@ -228,14 +262,31 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
- * is not a multiple of it; pass such requests to the fallback
+ /* CE does not handle 0 length messages */
+ if (!req->cryptlen)
+ return 0;
+
+ /*
+ * ECB and CBC algorithms require message lengths to be
+ * multiples of block size.
+ */
+ if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+ if (!IS_ALIGNED(req->cryptlen, blocksize))
+ return -EINVAL;
+
+ /*
+ * Conditions for requesting a fallback cipher
+ * AES-192 (not supported by crypto engine (CE))
+ * AES-XTS request with len <= 512 byte (not recommended to use CE)
+ * AES-XTS request with len > QCE_SECTOR_SIZE and
+ * is not a multiple of it.(Revisit this condition to check if it is
+ * needed in all versions of CE)
*/
if (IS_AES(rctx->flags) &&
- (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
- req->cryptlen <= aes_sw_max_len) ||
- (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
- req->cryptlen % QCE_SECTOR_SIZE))) {
+ ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+ (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
+ (req->cryptlen > QCE_SECTOR_SIZE &&
+ req->cryptlen % QCE_SECTOR_SIZE))))) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
@@ -307,7 +358,7 @@ static const struct qce_skcipher_def skcipher_def[] = {
.name = "ecb(aes)",
.drv_name = "ecb-aes-qce",
.blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = 0,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 81befe7febaa..ed03058497bc 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -48,7 +48,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
{
struct ahash_request *req = ahash_request_cast(dev->async_req);
struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
- int reg_status = 0;
+ int reg_status;
reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 682c8a450a57..55aa3a71169b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
@@ -401,7 +402,7 @@ static const struct samsung_aes_variant exynos_aes_data = {
static const struct samsung_aes_variant exynos5433_slim_aes_data = {
.aes_offset = 0x400,
.hash_offset = 0x800,
- .clk_names = { "pclk", "aclk", },
+ .clk_names = { "aclk", "pclk", },
};
static const struct of_device_id s5p_sss_dt_match[] = {
@@ -424,13 +425,9 @@ MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
static inline const struct samsung_aes_variant *find_s5p_sss_version
(const struct platform_device *pdev)
{
- if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
- const struct of_device_id *match;
+ if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
+ return of_device_get_match_data(&pdev->dev);
- match = of_match_node(s5p_sss_dt_match,
- pdev->dev.of_node);
- return (const struct samsung_aes_variant *)match->data;
- }
return (const struct samsung_aes_variant *)
platform_get_device_id(pdev)->driver_data;
}
@@ -2159,7 +2156,7 @@ static struct skcipher_alg algs[] = {
static int s5p_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int i, j, err = -ENODEV;
+ int i, j, err;
const struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
@@ -2189,14 +2186,14 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
pdata->res = res;
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ioaddr)) {
if (!pdata->use_hash)
return PTR_ERR(pdata->ioaddr);
/* try AES without HASH */
res->end -= 0x300;
pdata->use_hash = false;
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
}
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index f300b0a5958a..1c6929fb3a13 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -69,8 +69,24 @@
/* Max Authentication tag size */
#define SA_MAX_AUTH_TAG_SZ 64
-#define PRIV_ID 0x1
-#define PRIV 0x1
+enum sa_algo_id {
+ SA_ALG_CBC_AES = 0,
+ SA_ALG_EBC_AES,
+ SA_ALG_CBC_DES3,
+ SA_ALG_ECB_DES3,
+ SA_ALG_SHA1,
+ SA_ALG_SHA256,
+ SA_ALG_SHA512,
+ SA_ALG_AUTHENC_SHA1_AES,
+ SA_ALG_AUTHENC_SHA256_AES,
+};
+
+struct sa_match_data {
+ u8 priv;
+ u8 priv_id;
+ u32 supported_algos;
+ bool skip_engine_control;
+};
static struct device *sa_k3_dev;
@@ -696,8 +712,9 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
}
static
-int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
- u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
+int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
+ const u8 *enc_key, u16 enc_key_sz,
+ const u8 *auth_key, u16 auth_key_sz,
struct algo_data *ad, u8 enc, u32 *swinfo)
{
int enc_sc_offset = 0;
@@ -732,8 +749,8 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
memcpy(&sc_buf[2], &sc_id, 2);
sc_buf[4] = 0x0;
- sc_buf[5] = PRIV_ID;
- sc_buf[6] = PRIV;
+ sc_buf[5] = match_data->priv_id;
+ sc_buf[6] = match_data->priv;
sc_buf[7] = 0x0;
/* Prepare context for encryption engine */
@@ -892,8 +909,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
return ret;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
- &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
+ ad, 1, &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -905,8 +922,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
- &ctx->dec.epib[1]))
+ if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
+ ad, 0, &ctx->dec.epib[1]))
goto badkey;
cfg.enc_eng_id = ad->enc_eng.eng_id;
@@ -1106,7 +1123,7 @@ static int sa_run(struct sa_req *req)
else
dma_rx = pdata->dma_rx1;
- ddev = dma_rx->device->dev;
+ ddev = dmaengine_get_dma_device(pdata->dma_tx);
rxd->ddev = ddev;
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
@@ -1146,8 +1163,10 @@ static int sa_run(struct sa_req *req)
mapped_sg->sgt.sgl = src;
mapped_sg->sgt.orig_nents = src_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
- if (ret)
+ if (ret) {
+ kfree(rxd);
return ret;
+ }
mapped_sg->dir = dir_src;
mapped_sg->mapped = true;
@@ -1155,8 +1174,10 @@ static int sa_run(struct sa_req *req)
mapped_sg->sgt.sgl = req->src;
mapped_sg->sgt.orig_nents = sg_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
- if (ret)
+ if (ret) {
+ kfree(rxd);
return ret;
+ }
mapped_sg->dir = dir_src;
mapped_sg->mapped = true;
@@ -1446,9 +1467,10 @@ static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
cfg.akey = NULL;
cfg.akey_len = 0;
+ ctx->dev_data = dev_get_drvdata(sa_k3_dev);
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
- &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
+ ad, 0, &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -1716,6 +1738,7 @@ static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
int ret;
memzero_explicit(ctx, sizeof(*ctx));
+ ctx->dev_data = data;
ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->shash)) {
@@ -1817,8 +1840,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
cfg.akey_len = keys.authkeylen;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
- keys.authkey, keys.authkeylen,
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
+ keys.enckeylen, keys.authkey, keys.authkeylen,
ad, 1, &ctx->enc.epib[1]))
return -EINVAL;
@@ -1831,8 +1854,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
- keys.authkey, keys.authkeylen,
+ if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
+ keys.enckeylen, keys.authkey, keys.authkeylen,
ad, 0, &ctx->dec.epib[1]))
return -EINVAL;
@@ -1950,7 +1973,7 @@ static int sa_aead_decrypt(struct aead_request *req)
}
static struct sa_alg_tmpl sa_algs[] = {
- {
+ [SA_ALG_CBC_AES] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(aes)",
@@ -1973,7 +1996,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_EBC_AES] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(aes)",
@@ -1995,7 +2018,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_CBC_DES3] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(des3_ede)",
@@ -2018,7 +2041,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_ECB_DES3] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(des3_ede)",
@@ -2040,7 +2063,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_SHA1] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2069,7 +2092,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_SHA256] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2098,7 +2121,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_SHA512] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2127,7 +2150,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_AUTHENC_SHA1_AES] = {
.type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2154,7 +2177,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_aead_decrypt,
},
},
- {
+ [SA_ALG_AUTHENC_SHA256_AES] = {
.type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2185,13 +2208,19 @@ static struct sa_alg_tmpl sa_algs[] = {
};
/* Register the algorithms in crypto framework */
-static void sa_register_algos(const struct device *dev)
+static void sa_register_algos(struct sa_crypto_data *dev_data)
{
+ const struct sa_match_data *match_data = dev_data->match_data;
+ struct device *dev = dev_data->dev;
char *alg_name;
u32 type;
int i, err;
for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ /* Skip unsupported algos */
+ if (!(match_data->supported_algos & BIT(i)))
+ continue;
+
type = sa_algs[i].type;
if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
alg_name = sa_algs[i].alg.skcipher.base.cra_name;
@@ -2329,14 +2358,39 @@ static int sa_link_child(struct device *dev, void *data)
return 0;
}
+static struct sa_match_data am654_match_data = {
+ .priv = 1,
+ .priv_id = 1,
+ .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0),
+};
+
+static struct sa_match_data am64_match_data = {
+ .priv = 0,
+ .priv_id = 0,
+ .supported_algos = BIT(SA_ALG_CBC_AES) |
+ BIT(SA_ALG_EBC_AES) |
+ BIT(SA_ALG_SHA256) |
+ BIT(SA_ALG_SHA512) |
+ BIT(SA_ALG_AUTHENC_SHA256_AES),
+ .skip_engine_control = true,
+};
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
+ { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
+ { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
static int sa_ul_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct resource *res;
static void __iomem *saul_base;
struct sa_crypto_data *dev_data;
- u32 val;
int ret;
dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
@@ -2350,7 +2404,7 @@ static int sa_ul_probe(struct platform_device *pdev)
dev_set_drvdata(sa_k3_dev, dev_data);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
ret);
@@ -2362,18 +2416,28 @@ static int sa_ul_probe(struct platform_device *pdev)
if (ret)
goto disable_pm_runtime;
+ match = of_match_node(of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ dev_data->match_data = match->data;
+
spin_lock_init(&dev_data->scid_lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
saul_base = devm_ioremap_resource(dev, res);
dev_data->base = saul_base;
- val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
- SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
- SA_EEC_TRNG_EN;
- writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+ if (!dev_data->match_data->skip_engine_control) {
+ u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
+ SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+ SA_EEC_TRNG_EN;
- sa_register_algos(dev);
+ writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+ }
+
+ sa_register_algos(dev_data);
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (ret)
@@ -2419,13 +2483,6 @@ static int sa_ul_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id of_match[] = {
- {.compatible = "ti,j721e-sa2ul",},
- {.compatible = "ti,am654-sa2ul",},
- {},
-};
-MODULE_DEVICE_TABLE(of, of_match);
-
static struct platform_driver sa_ul_driver = {
.probe = sa_ul_probe,
.remove = sa_ul_remove,
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index f597ddecde34..ed66d1f111db 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -171,9 +171,12 @@ struct sa_tfm_ctx;
#define SA_UNSAFE_DATA_SZ_MIN 240
#define SA_UNSAFE_DATA_SZ_MAX 256
+struct sa_match_data;
+
/**
* struct sa_crypto_data - Crypto driver instance data
* @base: Base address of the register space
+ * @soc_data: Pointer to SoC specific data
* @pdev: Platform device pointer
* @sc_pool: security context pool
* @dev: Device pointer
@@ -189,6 +192,7 @@ struct sa_tfm_ctx;
*/
struct sa_crypto_data {
void __iomem *base;
+ const struct sa_match_data *match_data;
struct platform_device *pdev;
struct dma_pool *sc_pool;
struct device *dev;
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 2a4793176c71..7389a0536ff0 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
- pm_runtime_get_sync(cryp->dev);
+ pm_runtime_resume_and_get(cryp->dev);
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
if (!cryp)
return -ENODEV;
- ret = pm_runtime_get_sync(cryp->dev);
+ ret = pm_runtime_resume_and_get(cryp->dev);
if (ret < 0)
return ret;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 7ac0573ef663..389de9e3302d 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
@@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (!hdev)
return -ENODEV;
- ret = pm_runtime_get_sync(hdev->dev);
+ ret = pm_runtime_resume_and_get(hdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index 9866c2a5e9a7..759d0d9786fd 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -15,7 +15,7 @@
#include "cryp_p.h"
#include "cryp.h"
-/**
+/*
* cryp_wait_until_done - wait until the device logic is not busy
*/
void cryp_wait_until_done(struct cryp_device_data *device_data)
@@ -285,6 +285,7 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data,
* other device context parameter
* @device_data: Pointer to the device data struct for base address.
* @ctx: Crypto device context
+ * @cryp_mode: Mode: Polling, Interrupt or DMA
*/
void cryp_save_device_context(struct cryp_device_data *device_data,
struct cryp_device_context *ctx,
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
index 8da7f87b339b..db5713d7c940 100644
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index c3adeb2e5823..30cdd5253929 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
@@ -62,7 +62,7 @@ struct cryp_driver_data {
/**
* struct cryp_ctx - Crypto context
* @config: Crypto mode.
- * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @key: Key array.
* @keylen: Length of key.
* @iv: Pointer to initialization vector.
* @indata: Pointer to indata.
@@ -73,6 +73,7 @@ struct cryp_driver_data {
* @updated: Updated flag.
* @dev_ctx: Device dependent context.
* @device: Pointer to the device.
+ * @session_id: Atomic session ID.
*/
struct cryp_ctx {
struct cryp_config config;
@@ -608,12 +609,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
chan = ctx->device->dma.chan_mem2cryp;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
- ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+ ctx->device->dma.nents_src, DMA_TO_DEVICE);
chan = ctx->device->dma.chan_cryp2mem;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
- ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+ ctx->device->dma.nents_dst, DMA_FROM_DEVICE);
}
static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
@@ -1290,7 +1291,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(device_data->base)) {
- dev_err(dev, "[%s]: ioremap failed!", __func__);
ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
index 7ebde69e8c76..6d2f07bec98a 100644
--- a/drivers/crypto/ux500/cryp/cryp_irq.c
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
index 1984f30100ff..da90029ea141 100644
--- a/drivers/crypto/ux500/cryp/cryp_irq.h
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -19,7 +19,7 @@ enum cryp_irq_src_id {
CRYP_IRQ_SRC_ALL = 0x3
};
-/**
+/*
* M0 Funtions
*/
void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
index 879ed68a12d7..4981a3f461e5 100644
--- a/drivers/crypto/ux500/cryp/cryp_irqp.h
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -13,7 +13,7 @@
#include "cryp_irq.h"
-/**
+/*
*
* CRYP Registers - Offset mapping
* +-----------------+
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
index 0df84eaa8531..60b47fe4de35 100644
--- a/drivers/crypto/ux500/cryp/cryp_p.h
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -17,7 +17,7 @@
#include "cryp.h"
#include "cryp_irqp.h"
-/**
+/*
* Generic Macros
*/
#define CRYP_SET_BITS(reg_name, mask) \
@@ -34,7 +34,7 @@
writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
(((u32)val << shift) & (mask))), reg)
-/**
+/*
* CRYP specific Macros
*/
#define CRYP_PERIPHERAL_ID0 0xE3
@@ -48,7 +48,7 @@
#define CRYP_PCELL_ID2 0x05
#define CRYP_PCELL_ID3 0xB1
-/**
+/*
* CRYP register default values
*/
#define MAX_DEVICE_SUPPORT 2
@@ -62,7 +62,7 @@
#define CRYP_KEY_DEFAULT 0x0
#define CRYP_INIT_VECT_DEFAULT 0x0
-/**
+/*
* CRYP Control register specific mask
*/
#define CRYP_CR_SECURE_MASK BIT(0)
@@ -81,7 +81,6 @@
CRYP_CR_PRLG_MASK |\
CRYP_CR_ALGODIR_MASK |\
CRYP_CR_ALGOMODE_MASK |\
- CRYP_CR_DATATYPE_MASK |\
CRYP_CR_KEYSIZE_MASK |\
CRYP_CR_KEYRDEN_MASK |\
CRYP_CR_DATATYPE_MASK)
@@ -91,7 +90,7 @@
#define CRYP_SR_IFEM_MASK BIT(0)
#define CRYP_SR_BUSY_MASK BIT(4)
-/**
+/*
* Bit position used while setting bits in register
*/
#define CRYP_CR_PRLG_POS 1
@@ -107,7 +106,7 @@
#define CRYP_SR_BUSY_POS 4
-/**
+/*
* CRYP PCRs------PC_NAND control register
* BIT_MASK
*/
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index da284b0ea1b2..ecb7412e84e3 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -190,7 +190,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
chan = ctx->device->dma.chan_mem2hash;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
- ctx->device->dma.sg_len, DMA_TO_DEVICE);
+ ctx->device->dma.nents, DMA_TO_DEVICE);
}
static int hash_dma_write(struct hash_ctx *ctx,
@@ -356,7 +356,7 @@ out:
/**
* hash_get_device_data - Checks for an available hash device and return it.
- * @hash_ctx: Structure for the hash context.
+ * @ctx: Structure for the hash context.
* @device_data: Structure for the hash device.
*
* This function check for an available hash device and return it to
@@ -542,7 +542,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
}
/**
- * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*
* Initialize structures.
@@ -585,6 +585,7 @@ static int ux500_hash_init(struct ahash_request *req)
* @device_data: Structure for the hash device.
* @message: Block (512 bits) of message to be written to
* the HASH hardware.
+ * @length: Message length
*
*/
static void hash_processblock(struct hash_device_data *device_data,
@@ -1295,7 +1296,7 @@ void hash_get_digest(struct hash_device_data *device_data,
}
/**
- * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * ahash_update - The hash update function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*/
static int ahash_update(struct ahash_request *req)
@@ -1315,7 +1316,7 @@ static int ahash_update(struct ahash_request *req)
}
/**
- * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * ahash_final - The hash final function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*/
static int ahash_final(struct ahash_request *req)
@@ -1615,9 +1616,6 @@ static struct hash_algo_template hash_algs[] = {
}
};
-/**
- * hash_algs_register_all -
- */
static int ahash_algs_register_all(struct hash_device_data *device_data)
{
int ret;
@@ -1640,9 +1638,6 @@ unreg:
return ret;
}
-/**
- * hash_algs_unregister_all -
- */
static void ahash_algs_unregister_all(struct hash_device_data *device_data)
{
int i;
@@ -1681,7 +1676,6 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(device_data->base)) {
- dev_err(dev, "%s: ioremap() failed!\n", __func__);
ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d05c02baebcf..ec06189fbf99 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index d88084447f1c..ed0debc7acb5 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CBC routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 79ba062ee1c1..9a3da8cd62f3 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CTR routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 9fee1b1532a4..dabbccb41550 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES XTS routines supporting VMX In-core instructions on Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 14807ac2e3b9..5bc5710a6de0 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* GHASH routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015, 2019 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index a40d08e75fc0..7eb713cc87c8 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 244cb7d89678..2acc6173da36 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -4,6 +4,7 @@
#include <linux/security.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/idr.h>
@@ -96,21 +97,18 @@ struct mbox_cmd {
* @dev: driver core device object
* @cdev: char dev core object for ioctl operations
* @cxlm: pointer to the parent device driver data
- * @ops_active: active user of @cxlm in ops handlers
- * @ops_dead: completion when all @cxlm ops users have exited
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
struct device dev;
struct cdev cdev;
struct cxl_mem *cxlm;
- struct percpu_ref ops_active;
- struct completion ops_dead;
int id;
};
static int cxl_mem_major;
static DEFINE_IDA(cxl_memdev_ida);
+static DECLARE_RWSEM(cxl_memdev_rwsem);
static struct dentry *cxl_debugfs;
static bool cxl_raw_allow_all;
@@ -169,7 +167,7 @@ struct cxl_mem_command {
* table will be validated against the user's input. For example, if size_in is
* 0, and the user passed in 1, it is an error.
*/
-static struct cxl_mem_command mem_commands[] = {
+static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
CXL_CMD(RAW, ~0, ~0, 0),
@@ -776,26 +774,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct cxl_memdev *cxlmd;
- struct inode *inode;
- int rc = -ENOTTY;
+ struct cxl_memdev *cxlmd = file->private_data;
+ int rc = -ENXIO;
- inode = file_inode(file);
- cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+ down_read(&cxl_memdev_rwsem);
+ if (cxlmd->cxlm)
+ rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+ up_read(&cxl_memdev_rwsem);
- if (!percpu_ref_tryget_live(&cxlmd->ops_active))
- return -ENXIO;
+ return rc;
+}
- rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+static int cxl_memdev_open(struct inode *inode, struct file *file)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(inode->i_cdev, typeof(*cxlmd), cdev);
- percpu_ref_put(&cxlmd->ops_active);
+ get_device(&cxlmd->dev);
+ file->private_data = cxlmd;
- return rc;
+ return 0;
+}
+
+static int cxl_memdev_release_file(struct inode *inode, struct file *file)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+
+ put_device(&cxlmd->dev);
+
+ return 0;
}
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cxl_memdev_ioctl,
+ .open = cxl_memdev_open,
+ .release = cxl_memdev_release_file,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -984,7 +999,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
return NULL;
}
- offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
+ offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
/* Basic sanity check that BAR is big enough */
@@ -1049,7 +1064,6 @@ static void cxl_memdev_release(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- percpu_ref_exit(&cxlmd->ops_active);
ida_free(&cxl_memdev_ida, cxlmd->id);
kfree(cxlmd);
}
@@ -1066,7 +1080,7 @@ static ssize_t firmware_version_show(struct device *dev,
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
- return sprintf(buf, "%.16s\n", cxlm->firmware_version);
+ return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
@@ -1076,7 +1090,7 @@ static ssize_t payload_max_show(struct device *dev,
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
- return sprintf(buf, "%zu\n", cxlm->payload_size);
+ return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -1087,7 +1101,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->ram_range);
- return sprintf(buf, "%#llx\n", len);
+ return sysfs_emit(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_ram_size =
@@ -1100,7 +1114,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->pmem_range);
- return sprintf(buf, "%#llx\n", len);
+ return sysfs_emit(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_pmem_size =
@@ -1150,27 +1164,24 @@ static const struct device_type cxl_memdev_type = {
.groups = cxl_memdev_attribute_groups,
};
-static void cxlmdev_unregister(void *_cxlmd)
+static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct device *dev = &cxlmd->dev;
-
- percpu_ref_kill(&cxlmd->ops_active);
- cdev_device_del(&cxlmd->cdev, dev);
- wait_for_completion(&cxlmd->ops_dead);
+ down_write(&cxl_memdev_rwsem);
cxlmd->cxlm = NULL;
- put_device(dev);
+ up_write(&cxl_memdev_rwsem);
}
-static void cxlmdev_ops_active_release(struct percpu_ref *ref)
+static void cxl_memdev_unregister(void *_cxlmd)
{
- struct cxl_memdev *cxlmd =
- container_of(ref, typeof(*cxlmd), ops_active);
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
- complete(&cxlmd->ops_dead);
+ cdev_device_del(&cxlmd->cdev, dev);
+ cxl_memdev_shutdown(cxlmd);
+ put_device(dev);
}
-static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
{
struct pci_dev *pdev = cxlm->pdev;
struct cxl_memdev *cxlmd;
@@ -1180,22 +1191,11 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
if (!cxlmd)
- return -ENOMEM;
- init_completion(&cxlmd->ops_dead);
-
- /*
- * @cxlm is deallocated when the driver unbinds so operations
- * that are using it need to hold a live reference.
- */
- cxlmd->cxlm = cxlm;
- rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
- GFP_KERNEL);
- if (rc)
- goto err_ref;
+ return ERR_PTR(-ENOMEM);
rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
if (rc < 0)
- goto err_id;
+ goto err;
cxlmd->id = rc;
dev = &cxlmd->dev;
@@ -1204,30 +1204,54 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
- dev_set_name(dev, "mem%d", cxlmd->id);
+ device_set_pm_not_required(dev);
cdev = &cxlmd->cdev;
cdev_init(cdev, &cxl_memdev_fops);
+ return cxlmd;
+
+err:
+ kfree(cxlmd);
+ return ERR_PTR(rc);
+}
+
+static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+{
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = cxl_memdev_alloc(cxlm);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+ dev = &cxlmd->dev;
+ rc = dev_set_name(dev, "mem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ /*
+ * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+ * needed as this is ordered with cdev_add() publishing the device.
+ */
+ cxlmd->cxlm = cxlm;
+ cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
if (rc)
- goto err_add;
+ goto err;
- return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
+ return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister,
+ cxlmd);
-err_add:
- ida_free(&cxl_memdev_ida, cxlmd->id);
-err_id:
+err:
/*
- * Theoretically userspace could have already entered the fops,
- * so flush ops_active.
+ * The cdev was briefly live, shutdown any ioctl operations that
+ * saw that state.
*/
- percpu_ref_kill(&cxlmd->ops_active);
- wait_for_completion(&cxlmd->ops_dead);
- percpu_ref_exit(&cxlmd->ops_active);
-err_ref:
- kfree(cxlmd);
-
+ cxl_memdev_shutdown(cxlmd);
+ put_device(dev);
return rc;
}
@@ -1396,6 +1420,7 @@ out:
*/
static int cxl_mem_identify(struct cxl_mem *cxlm)
{
+ /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify {
char fw_revision[0x10];
__le64 total_capacity;
@@ -1424,10 +1449,11 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
* For now, only the capacity is exported in sysfs
*/
cxlm->ram_range.start = 0;
- cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
+ cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
cxlm->pmem_range.start = 0;
- cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
+ cxlm->pmem_range.end =
+ le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 452e85ae87a8..5aee26e1bbd6 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -90,13 +90,11 @@ static ssize_t do_id_store(struct device_driver *drv, const char *buf,
list_add(&dax_id->list, &dax_drv->ids);
} else
rc = -ENOMEM;
- } else
- /* nothing to remove */;
+ }
} else if (action == ID_REMOVE) {
list_del(&dax_id->list);
kfree(dax_id);
- } else
- /* dax_id already added */;
+ }
mutex_unlock(&dax_bus_lock);
if (rc < 0)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index fe6a460c4373..af3ee288bc11 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
kfree(chan->dev);
err_free_local:
free_percpu(chan->local);
+ chan->local = NULL;
return rc;
}
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index e5162690de8f..db25f9b7778c 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -10,6 +10,7 @@ config DW_DMAC_CORE
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA platform driver"
+ depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller. This
@@ -18,6 +19,7 @@ config DW_DMAC
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
depends on PCI
+ depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller on the
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 84a6ea60ecf0..31c819544a22 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
}
+void idxd_wq_reset(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand;
+
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+ return;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
+ wq->state = IDXD_WQ_DISABLED;
+}
+
int idxd_wq_map_portal(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
@@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
- int i, wq_offset;
lockdep_assert_held(&idxd->dev_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
@@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->ats_dis = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
-
- for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
- wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
- iowrite32(0, idxd->reg_base + wq_offset);
- dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
- wq->id, i, wq_offset,
- ioread32(idxd->reg_base + wq_offset));
- }
}
/* Device control bits */
@@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
}
/* Device configuration bits */
+void idxd_msix_perm_setup(struct idxd_device *idxd)
+{
+ union msix_perm mperm;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(idxd->pdev);
+ if (msixcnt < 0)
+ return;
+
+ mperm.bits = 0;
+ mperm.pasid = idxd->pasid;
+ mperm.pasid_en = device_pasid_enabled(idxd);
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
+void idxd_msix_perm_clear(struct idxd_device *idxd)
+{
+ union msix_perm mperm;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(idxd->pdev);
+ if (msixcnt < 0)
+ return;
+
+ mperm.bits = 0;
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
static void idxd_group_config_write(struct idxd_group *group)
{
struct idxd_device *idxd = group->idxd;
@@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
if (!wq->group)
return 0;
- memset(wq->wqcfg, 0, idxd->wqcfg_size);
+ /*
+ * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
+ * wq reset. This will copy back the sticky values that are present on some devices.
+ */
+ for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+ wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+ wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
+ }
/* byte 0-3 */
wq->wqcfg->wq_size = wq->size;
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 81a0e65fd316..76014c14f473 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -316,6 +316,8 @@ void idxd_unregister_driver(void);
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
/* device interrupt control */
+void idxd_msix_perm_setup(struct idxd_device *idxd);
+void idxd_msix_perm_clear(struct idxd_device *idxd);
irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
@@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
void idxd_wq_drain(struct idxd_wq *wq);
+void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 085a0c3b62c6..6584b0ec07d5 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -65,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
- union msix_perm mperm;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
@@ -144,14 +143,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
idxd_unmask_error_interrupts(idxd);
-
- /* Setup MSIX permission table */
- mperm.bits = 0;
- mperm.pasid = idxd->pasid;
- mperm.pasid_en = device_pasid_enabled(idxd);
- for (i = 1; i < msixcnt; i++)
- iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-
+ idxd_msix_perm_setup(idxd);
return 0;
err_no_irq:
@@ -510,6 +502,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
idxd_flush_work_list(irq_entry);
}
+ idxd_msix_perm_clear(idxd);
destroy_workqueue(idxd->wq);
}
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index a60ca11a5784..f1463fc58112 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
for (i = 0; i < 4; i++)
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
IDXD_SWERR_OFFSET + i * sizeof(u64));
- iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+ iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
+ idxd->reg_base + IDXD_SWERR_OFFSET);
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
int id = idxd->sw_err.wq_idx;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 4dbb03c545e4..18bf4d148989 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
- int rc;
mutex_lock(&wq->wq_lock);
dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
@@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq)
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
- rc = idxd_wq_disable(wq);
+ idxd_wq_reset(wq);
idxd_wq_free_resources(wq);
wq->client_count = 0;
mutex_unlock(&wq->wq_lock);
- if (rc < 0)
- dev_warn(dev, "Failed to disable %s: %d\n",
- dev_name(&wq->conf_dev), rc);
- else
- dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+ dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
}
static int idxd_config_bus_remove(struct device *dev)
@@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev,
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
- if (wq->state != IDXD_WQ_DISABLED)
+ if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
@@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev,
{
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
+ int i, rc = 0;
+
+ for (i = 0; i < 4; i++)
+ rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
- return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+ rc--;
+ rc += sysfs_emit_at(buf, rc, "\n");
+ return rc;
}
static DEVICE_ATTR_RO(op_cap);
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
index f387c5bbc170..166934544161 100644
--- a/drivers/dma/plx_dma.c
+++ b/drivers/dma/plx_dma.c
@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
KBUILD_MODNAME, plxdev);
- if (rc) {
- kfree(plxdev);
- return rc;
- }
+ if (rc)
+ goto free_plx;
spin_lock_init(&plxdev->ring_lock);
tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
rc = dma_async_device_register(dma);
if (rc) {
pci_err(pdev, "Failed to register dma device: %d\n", rc);
- free_irq(pci_irq_vector(pdev, 0), plxdev);
- kfree(plxdev);
- return rc;
+ goto put_device;
}
pci_set_drvdata(pdev, plxdev);
return 0;
+
+put_device:
+ put_device(&pdev->dev);
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+free_plx:
+ kfree(plxdev);
+
+ return rc;
}
static int plx_dma_probe(struct pci_dev *pdev,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 71827d9b0aa1..b7260749e8ee 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
goto end;
}
if (!tdc->busy) {
- err = pm_runtime_get_sync(tdc->tdma->dev);
+ err = pm_runtime_resume_and_get(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
goto end;
@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
int err;
- err = pm_runtime_get_sync(tdc->tdma->dev);
+ err = pm_runtime_resume_and_get(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
return;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 55df63dead8d..70b29bd079c9 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
struct xilinx_dpdma_tx_desc *desc;
struct virt_dma_desc *vdesc;
u32 reg, channels;
+ bool first_frame;
lockdep_assert_held(&chan->lock);
@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
chan->running = true;
}
- if (chan->video_group)
- channels = xilinx_dpdma_chan_video_group_ready(chan);
- else
- channels = BIT(chan->id);
-
- if (!channels)
- return;
-
vdesc = vchan_next_desc(&chan->vchan);
if (!vdesc)
return;
@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
upper_32_bits(sw_desc->dma_addr)));
- if (chan->first_frame)
+ first_frame = chan->first_frame;
+ chan->first_frame = false;
+
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ /*
+ * Trigger the transfer only when all channels in the group are
+ * ready.
+ */
+ if (!channels)
+ return;
+ } else {
+ channels = BIT(chan->id);
+ }
+
+ if (first_frame)
reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
else
reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
- chan->first_frame = false;
-
dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
}
@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
*/
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
{
- struct xilinx_dpdma_tx_desc *active = chan->desc.active;
+ struct xilinx_dpdma_tx_desc *active;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
xilinx_dpdma_debugfs_desc_done_irq(chan);
+ active = chan->desc.active;
if (active)
vchan_cyclic_callback(&active->vdesc);
else
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9811c40956e5..17c9d825188b 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2545,7 +2545,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
struct driver_data *driver_data = packet->driver_data;
int ret = -ENOENT;
- tasklet_disable(&ctx->tasklet);
+ tasklet_disable_in_atomic(&ctx->tasklet);
if (packet->ack != 0)
goto out;
@@ -3465,7 +3465,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
struct iso_context *ctx = container_of(base, struct iso_context, base);
int ret = 0;
- tasklet_disable(&ctx->context.tasklet);
+ tasklet_disable_in_atomic(&ctx->context.tasklet);
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
context_tasklet((unsigned long)&ctx->context);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c23466e05e60..d0537573501e 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -13,7 +13,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
-Wno-pointer-sign \
$(call cc-disable-warning, address-of-packed-member) \
$(call cc-disable-warning, gnu) \
- -fno-asynchronous-unwind-tables
+ -fno-asynchronous-unwind-tables \
+ $(CLANG_FLAGS)
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 50bb2a6d6ccf..62f0d1a5dd32 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -2,7 +2,7 @@
/*
* Turris Mox rWTM firmware driver
*
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek BehĂșn <kabel@kernel.org>
*/
#include <linux/armada-37xx-rwtm-mailbox.h>
@@ -547,4 +547,4 @@ module_platform_driver(turris_mox_rwtm_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 8299909318f4..61f9efd6c64f 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -2,7 +2,7 @@
/*
* Turris Mox Moxtet GPIO expander
*
- * Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2018 Marek BehĂșn <kabel@kernel.org>
*/
#include <linux/bitops.h>
@@ -174,6 +174,6 @@ static struct moxtet_driver moxtet_gpio_driver = {
};
module_moxtet_driver(moxtet_gpio_driver);
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 41952bb818ad..56152263ab38 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -29,6 +29,7 @@
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
struct gpio_regs {
+ u32 sysconfig;
u32 irqenable1;
u32 irqenable2;
u32 wake_en;
@@ -1069,6 +1070,7 @@ static void omap_gpio_init_context(struct gpio_bank *p)
const struct omap_gpio_reg_offs *regs = p->regs;
void __iomem *base = p->base;
+ p->context.sysconfig = readl_relaxed(base + regs->sysconfig);
p->context.ctrl = readl_relaxed(base + regs->ctrl);
p->context.oe = readl_relaxed(base + regs->direction);
p->context.wake_en = readl_relaxed(base + regs->wkup_en);
@@ -1088,6 +1090,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
const struct omap_gpio_reg_offs *regs = bank->regs;
void __iomem *base = bank->base;
+ writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
writel_relaxed(bank->context.ctrl, base + regs->ctrl);
writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
@@ -1115,6 +1118,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
+ /* Save syconfig, it's runtime value can be different from init value */
+ if (bank->loses_context)
+ bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
+
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
@@ -1279,6 +1286,7 @@ out_unlock:
static const struct omap_gpio_reg_offs omap2_gpio_regs = {
.revision = OMAP24XX_GPIO_REVISION,
+ .sysconfig = OMAP24XX_GPIO_SYSCONFIG,
.direction = OMAP24XX_GPIO_OE,
.datain = OMAP24XX_GPIO_DATAIN,
.dataout = OMAP24XX_GPIO_DATAOUT,
@@ -1302,6 +1310,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
static const struct omap_gpio_reg_offs omap4_gpio_regs = {
.revision = OMAP4_GPIO_REVISION,
+ .sysconfig = OMAP4_GPIO_SYSCONFIG,
.direction = OMAP4_GPIO_OE,
.datain = OMAP4_GPIO_DATAIN,
.dataout = OMAP4_GPIO_DATAOUT,
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 26c5466b8179..ae49bb23c6ed 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
long gpio;
struct gpio_desc *desc;
int status;
+ struct gpio_chip *gc;
+ int offset;
status = kstrtol(buf, 0, &gpio);
if (status < 0)
@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
+ gc = desc->gdev->chip;
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(gc, offset)) {
+ pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
+ return -EINVAL;
+ }
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9fd2157b133a..5efa331e3ee8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -906,7 +906,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
/* Allocate an SG array and squash pages into it */
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
- ttm->num_pages << PAGE_SHIFT,
+ (u64)ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (r)
goto release_sg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7d2c8b169827..326dae31b675 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3300,7 +3300,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_bo *root;
uint64_t value, flags;
struct amdgpu_vm *vm;
- long r;
+ int r;
spin_lock(&adev->vm_manager.pasid_lock);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
@@ -3349,6 +3349,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
value = 0;
}
+ r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+ if (r) {
+ pr_debug("failed %d to reserve fence slot\n", r);
+ goto error_unlock;
+ }
+
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
addr, flags, value, NULL, NULL,
NULL);
@@ -3360,7 +3366,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%ld)\n", r);
+ DRM_ERROR("Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 45d1172b7bff..63691deb7df3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3280,7 +3280,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 573cf17262da..d699a5cf6c11 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4072,13 +4072,6 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
return true;
/*
- * The arbitrary tiling support for multiplane formats has not been hooked
- * up.
- */
- if (info->num_planes > 1)
- return false;
-
- /*
* For D swizzle the canonical modifier depends on the bpp, so check
* it here.
*/
@@ -4096,6 +4089,10 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
/* Per radeonsi comments 16/64 bpp are more complicated. */
if (info->cpp[0] != 4)
return false;
+ /* We support multi-planar formats, but not when combined with
+ * additional DCC metadata planes. */
+ if (info->num_planes > 1)
+ return false;
}
return true;
@@ -4296,7 +4293,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
- AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
@@ -4308,7 +4305,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
- AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
index 705fbfc37502..8a32772d4e91 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -134,6 +134,7 @@
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index d0ec83881fc5..c0565a932a12 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12) ||
- (hwmgr->chip_id == CHIP_TONGA))
+ (hwmgr->chip_id == CHIP_TONGA) ||
+ (hwmgr->chip_id == CHIP_TOPAZ))
PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index e21fb14d5e07..833d0c1be4f1 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
return;
}
+ if (!pkg->package.count) {
+ DRM_DEBUG_DRIVER("no connection in _DSM\n");
+ return;
+ }
+
connector_count = &pkg->package.elements[0];
DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
(unsigned long long)connector_count->integer.value);
for (i = 1; i < pkg->package.count; i++) {
union acpi_object *obj = &pkg->package.elements[i];
- union acpi_object *connector_id = &obj->package.elements[0];
- union acpi_object *info = &obj->package.elements[1];
+ union acpi_object *connector_id;
+ union acpi_object *info;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
+ DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
+ continue;
+ }
+
+ connector_id = &obj->package.elements[0];
+ info = &obj->package.elements[1];
+ if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
+ DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
+ continue;
+ }
+
DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
(unsigned long long)connector_id->integer.value);
DRM_DEBUG_DRIVER(" port id: %s\n",
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 651884390137..4f8337c7fd2e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
break;
case INTEL_BACKLIGHT_DISPLAY_DDI:
try_intel_interface = true;
- try_vesa_interface = true;
break;
default:
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index be6ac0dd846e..2ed309534e97 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -848,7 +848,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
- return;
+ /* Still continue with enabling the port and link training. */
+ lttpr_count = 0;
if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f94025ec603a..a9a8ba1d3aba 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
* FIXME As we do with eDP, just make a note of the time here
* and perform the wait before the next panel power on.
*/
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static void intel_dsi_shutdown(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index fef1e857cefc..01c1d1b36acd 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -916,19 +916,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (!strncmp(cmd, "srm", 3) ||
!strncmp(cmd, "lrm", 3)) {
- if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
- offset != 0x21f0) {
+ if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) ||
+ offset == 0x21f0 ||
+ (IS_BROADWELL(gvt->gt->i915) &&
+ offset == i915_mmio_reg_offset(INSTPM)))
+ return 0;
+ else {
gvt_vgpu_err("%s access to register (%x)\n",
cmd, offset);
return -EPERM;
- } else
- return 0;
+ }
}
if (!strncmp(cmd, "lrr-src", 7) ||
!strncmp(cmd, "lrr-dst", 7)) {
- gvt_vgpu_err("not allowed cmd %s\n", cmd);
- return -EPERM;
+ if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c)
+ return 0;
+ else {
+ gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset);
+ return -EPERM;
+ }
}
if (!strncmp(cmd, "pipe_ctrl", 9)) {
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index e622aee6e4be..440c35f1abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -105,7 +105,7 @@ static inline bool tasklet_is_locked(const struct tasklet_struct *t)
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
- tasklet_unlock_wait(t);
+ tasklet_unlock_spin_wait(t);
}
static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 97b57acc02e2..4b4d8d034782 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5471,12 +5471,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
- memset(wm, 0, sizeof(*wm));
-
/* Watermarks calculated in master */
if (plane_state->planar_slave)
return 0;
+ memset(wm, 0, sizeof(*wm));
+
if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7e553d3efeb2..ce13d49e615b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1386,8 +1386,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
- REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 690409ca8a18..d553f62f4eeb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -567,17 +567,17 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
} else {
/*
* a650 tier targets don't need whereami but still need to be
- * equal to or newer than 1.95 for other security fixes
+ * equal to or newer than 0.95 for other security fixes
*/
if (adreno_is_a650(adreno_gpu)) {
- if ((buf[0] & 0xfff) >= 0x195) {
+ if ((buf[0] & 0xfff) >= 0x095) {
ret = true;
goto out;
}
DRM_DEV_ERROR(&gpu->pdev->dev,
"a650 SQE ucode is too old. Have version %x need at least %x\n",
- buf[0] & 0xfff, 0x195);
+ buf[0] & 0xfff, 0x095);
}
/*
@@ -1228,8 +1228,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
- *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
- REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
mutex_unlock(&perfcounter_oob);
@@ -1406,7 +1406,13 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
int ret;
ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
- if (ret) {
+ /*
+ * -ENOENT means that the platform doesn't support speedbin which is
+ * fine
+ */
+ if (ret == -ENOENT) {
+ return 0;
+ } else if (ret) {
DRM_DEV_ERROR(dev,
"failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
ret);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 8981cfa9dbc3..92e6f1b94738 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
- DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->merge_3d)
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ BIT(cfg->merge_3d - MERGE_3D_0));
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a5c6b8c23336..196907689c82 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -570,6 +570,7 @@ err_free_priv:
kfree(priv);
err_put_drm_dev:
drm_dev_put(ddev);
+ platform_set_drvdata(pdev, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index af381d756ac1..5fbfb71ca3d9 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -37,6 +37,7 @@ struct dsic_panel_data {
u32 height_mm;
u32 max_hs_rate;
u32 max_lp_rate;
+ bool te_support;
};
struct panel_drv_data {
@@ -334,9 +335,11 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (r)
- goto err;
+ if (ddata->panel_data->te_support) {
+ r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (r)
+ goto err;
+ }
/* possible panel bug */
msleep(100);
@@ -619,6 +622,7 @@ static const struct dsic_panel_data taal_data = {
.height_mm = 0,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = true,
};
static const struct dsic_panel_data himalaya_data = {
@@ -629,6 +633,7 @@ static const struct dsic_panel_data himalaya_data = {
.height_mm = 88,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = false,
};
static const struct dsic_panel_data droid4_data = {
@@ -639,6 +644,7 @@ static const struct dsic_panel_data droid4_data = {
.height_mm = 89,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = false,
};
static const struct of_device_id dsicm_of_match[] = {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e8c66d10478f..78893bea85ae 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -364,7 +364,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
/* check that we only pin down anonymous memory
to prevent problems with writeback */
- unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+ unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
if (!vma || vma->vm_file || vma->vm_end < end)
@@ -386,7 +386,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
} while (pinned < ttm->num_pages);
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
- ttm->num_pages << PAGE_SHIFT,
+ (u64)ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (r)
goto release_sg;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 269390bc586e..76657dcdf9b0 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
u32 fifo_len_bytes = pv_data->fifo_depth;
/*
@@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
if (crtc_data->hvs_output == 5)
return 32;
+ /*
+ * It looks like in some situations, we will overflow
+ * the PixelValve FIFO (with the bit 10 of PV stat being
+ * set) and stall the HVS / PV, eventually resulting in
+ * a page flip timeout.
+ *
+ * Displaying the video overlay during a playback with
+ * Kodi on an RPi3 seems to be a great solution with a
+ * failure rate around 50%.
+ *
+ * Removing 1 from the FIFO full level however
+ * seems to completely remove that issue.
+ */
+ if (!vc4->hvs->hvs5)
+ return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
+
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 7322169c0682..1e9c84cf614a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1146,7 +1146,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_y = state->src_y;
plane->state->src_w = state->src_w;
plane->state->src_h = state->src_h;
- plane->state->src_h = state->src_h;
plane->state->alpha = state->alpha;
plane->state->pixel_blend_mode = state->pixel_blend_mode;
plane->state->rotation = state->rotation;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ba658fa9cf6c..183571c387b7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -481,11 +481,15 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
vmw_bo_unreference(&old_buf);
res->id = vcotbl->type;
+ /* Release the pin acquired in vmw_bo_init */
+ ttm_bo_unpin(bo);
+
return 0;
out_map_new:
ttm_bo_kunmap(&old_map);
out_wait:
+ ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&buf);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index dd69b51c40e4..6fa24645fbbf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -712,17 +712,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->last_read_seqno = (uint32_t) -100;
dev_priv->drm.dev_private = dev_priv;
- ret = vmw_setup_pci_resources(dev_priv, pci_id);
- if (ret)
- return ret;
- ret = vmw_detect_version(dev_priv);
- if (ret)
- goto out_no_pci_or_version;
-
mutex_init(&dev_priv->cmdbuf_mutex);
- mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
- mutex_init(&dev_priv->global_kms_state_mutex);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock);
@@ -730,6 +721,14 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
spin_lock_init(&dev_priv->cap_lock);
spin_lock_init(&dev_priv->cursor_lock);
+ ret = vmw_setup_pci_resources(dev_priv, pci_id);
+ if (ret)
+ return ret;
+ ret = vmw_detect_version(dev_priv);
+ if (ret)
+ goto out_no_pci_or_version;
+
+
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5fa5bcd20cc5..eb76a6b9ebca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -529,7 +529,6 @@ struct vmw_private {
struct vmw_overlay *overlay_priv;
struct drm_property *hotplug_mode_update_property;
struct drm_property *implicit_placement_property;
- struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
struct drm_atomic_state *suspend_state;
@@ -592,7 +591,6 @@ struct vmw_private {
bool refuse_hibernation;
bool suspend_locked;
- struct mutex release_mutex;
atomic_t num_fifo_resources;
/*
@@ -1524,9 +1522,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
struct vmw_buffer_object *tmp_buf = *buf;
*buf = NULL;
- if (tmp_buf != NULL) {
+ if (tmp_buf != NULL)
ttm_bo_put(&tmp_buf->base);
- }
}
static inline struct vmw_buffer_object *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index a372980fe6a5..f2d625415458 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -94,6 +94,16 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
struct vmw_piter data_iter,
unsigned long num_data_pages);
+
+static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
+{
+ int ret = ttm_bo_reserve(bo, false, true, NULL);
+ BUG_ON(ret != 0);
+ ttm_bo_unpin(bo);
+ ttm_bo_unreserve(bo);
+}
+
+
/*
* vmw_setup_otable_base - Issue an object table base setup command to
* the device
@@ -277,6 +287,7 @@ out_no_setup:
&batch->otables[i]);
}
+ vmw_bo_unpin_unlocked(batch->otable_bo);
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
return ret;
@@ -340,6 +351,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
BUG_ON(ret != 0);
vmw_bo_fence_single(bo, NULL);
+ ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
ttm_bo_put(batch->otable_bo);
@@ -528,6 +540,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
void vmw_mob_destroy(struct vmw_mob *mob)
{
if (mob->pt_bo) {
+ vmw_bo_unpin_unlocked(mob->pt_bo);
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
@@ -643,6 +656,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv);
if (pt_set_up) {
+ vmw_bo_unpin_unlocked(mob->pt_bo);
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 30d9adf31c84..9f14d99c763c 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
if (IS_ERR(drm_dev)) {
ret = PTR_ERR(drm_dev);
- goto fail;
+ goto fail_dev;
}
drm_info->drm_dev = drm_dev;
@@ -551,8 +551,10 @@ fail_modeset:
drm_kms_helper_poll_fini(drm_dev);
drm_mode_config_cleanup(drm_dev);
drm_dev_put(drm_dev);
-fail:
+fail_dev:
kfree(drm_info);
+ front_info->drm_info = NULL;
+fail:
return ret;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
index 3adacba9a23b..e5f4314899ee 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h
@@ -16,7 +16,6 @@
struct drm_connector;
struct xen_drm_front_drm_info;
-struct xen_drm_front_drm_info;
int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
struct drm_connector *connector);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index dbac16641662..ddecc84fd6f0 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmi.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
@@ -22,9 +23,13 @@
#define ACEL_EN BIT(0)
#define GYRO_EN BIT(1)
-#define MAGNO_EN BIT(2)
+#define MAGNO_EN BIT(2)
#define ALS_EN BIT(19)
+static int sensor_mask_override = -1;
+module_param_named(sensor_mask, sensor_mask_override, int, 0444);
+MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
+static const struct dmi_system_id dmi_sensor_mask_overrides[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"),
+ },
+ .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"),
+ },
+ .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+ },
+ { }
+};
+
int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
{
int activestatus, num_of_sensors = 0;
+ const struct dmi_system_id *dmi_id;
+ u32 activecontrolstatus;
+
+ if (sensor_mask_override == -1) {
+ dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
+ if (dmi_id)
+ sensor_mask_override = (long)dmi_id->driver_data;
+ }
+
+ if (sensor_mask_override >= 0) {
+ activestatus = sensor_mask_override;
+ } else {
+ activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
+ activestatus = activecontrolstatus >> 4;
+ }
- privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
- activestatus = privdata->activecontrolstatus >> 4;
if (ACEL_EN & activestatus)
sensor_id[num_of_sensors++] = accel_idx;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 8f8d19b2cfe5..489415f7c22c 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -61,7 +61,6 @@ struct amd_mp2_dev {
struct pci_dev *pdev;
struct amdtp_cl_data *cl_data;
void __iomem *mmio;
- u32 activecontrolstatus;
};
struct amd_mp2_sensor_info {
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 3feaece13ade..6b665931147d 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (input_register_device(data->input2)) {
input_free_device(input2);
+ ret = -ENOENT;
goto exit;
}
}
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 1dfe184ebf5a..2ab22b925941 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1222,6 +1222,9 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 21e15627a461..477baa30889c 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -161,6 +161,7 @@ struct cp2112_device {
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
+ struct irq_chip irq;
u8 *in_out_buffer;
struct mutex lock;
@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip cp2112_gpio_irqchip = {
- .name = "cp2112-gpio",
- .irq_startup = cp2112_gpio_irq_startup,
- .irq_shutdown = cp2112_gpio_irq_shutdown,
- .irq_ack = cp2112_gpio_irq_ack,
- .irq_mask = cp2112_gpio_irq_mask,
- .irq_unmask = cp2112_gpio_irq_unmask,
- .irq_set_type = cp2112_gpio_irq_type,
-};
-
static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
int pin)
{
@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.can_sleep = 1;
dev->gc.parent = &hdev->dev;
+ dev->irq.name = "cp2112-gpio";
+ dev->irq.irq_startup = cp2112_gpio_irq_startup;
+ dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
+ dev->irq.irq_ack = cp2112_gpio_irq_ack;
+ dev->irq.irq_mask = cp2112_gpio_irq_mask;
+ dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
+ dev->irq.irq_set_type = cp2112_gpio_irq_type;
+ dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
+
girq = &dev->gc.irq;
- girq->chip = &cp2112_gpio_irqchip;
+ girq->chip = &dev->irq;
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index d9319622da44..e60c31dd05ff 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -574,6 +574,8 @@ static void hammer_remove(struct hid_device *hdev)
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e42aaae3138f..67fd8a2f5aba 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -194,6 +194,7 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
+#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
#define USB_VENDOR_ID_ATEN 0x0557
@@ -493,6 +494,7 @@
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
+#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 44d715c12f6a..2d70dc4bea65 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
!wacom_wac->shared->is_touch_on) {
if (!wacom_wac->shared->touch_down)
return;
- prox = 0;
+ prox = false;
}
wacom_wac->hid_data.num_received++;
@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
return -ENODEV;
@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
return 0;
}
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
return -ENODEV;
@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
/* setup has already been done */
return 0;
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
if (features->touch_max == 1) {
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 79e5356a737a..66c794d92391 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -23,6 +23,7 @@ config HYPERV_UTILS
config HYPERV_BALLOON
tristate "Microsoft Hyper-V Balloon driver"
depends on HYPERV
+ select PAGE_REPORTING
help
Select this option to enable Hyper-V Balloon driver.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 0bd202de7960..c2635e913a92 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -209,31 +209,96 @@ int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
+static int send_modifychannel_without_ack(struct vmbus_channel *channel, u32 target_vp)
+{
+ struct vmbus_channel_modifychannel msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+ msg.child_relid = channel->offermsg.child_relid;
+ msg.target_vp = target_vp;
+
+ ret = vmbus_post_msg(&msg, sizeof(msg), true);
+ trace_vmbus_send_modifychannel(&msg, ret);
+
+ return ret;
+}
+
+static int send_modifychannel_with_ack(struct vmbus_channel *channel, u32 target_vp)
+{
+ struct vmbus_channel_modifychannel *msg;
+ struct vmbus_channel_msginfo *info;
+ unsigned long flags;
+ int ret;
+
+ info = kzalloc(sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_modifychannel),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ init_completion(&info->waitevent);
+ info->waiting_channel = channel;
+
+ msg = (struct vmbus_channel_modifychannel *)info->msg;
+ msg->header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+ msg->child_relid = channel->offermsg.child_relid;
+ msg->target_vp = target_vp;
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&info->msglistentry, &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(msg, sizeof(*msg), true);
+ trace_vmbus_send_modifychannel(msg, ret);
+ if (ret != 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ goto free_info;
+ }
+
+ /*
+ * Release channel_mutex; otherwise, vmbus_onoffer_rescind() could block on
+ * the mutex and be unable to signal the completion.
+ *
+ * See the caller target_cpu_store() for information about the usage of the
+ * mutex.
+ */
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ wait_for_completion(&info->waitevent);
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (info->response.modify_response.status)
+ ret = -EAGAIN;
+
+free_info:
+ kfree(info);
+ return ret;
+}
+
/*
* Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
*
- * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. Also, Hyper-V does not
- * ACK such messages. IOW we can't know when the host will stop interrupting
- * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
+ * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. When VMbus version 5.3
+ * or later is negotiated, Hyper-V always sends an ACK in response to such a
+ * message. For VMbus version 5.2 and earlier, it never sends an ACK. With-
+ * out an ACK, we can not know when the host will stop interrupting the "old"
+ * vCPU and start interrupting the "new" vCPU for the given channel.
*
* The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
* VERSION_WIN10_V4_1.
*/
-int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp)
{
- struct vmbus_channel_modifychannel conn_msg;
- int ret;
-
- memset(&conn_msg, 0, sizeof(conn_msg));
- conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
- conn_msg.child_relid = child_relid;
- conn_msg.target_vp = target_vp;
-
- ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
-
- trace_vmbus_send_modifychannel(&conn_msg, ret);
-
- return ret;
+ if (vmbus_proto_version >= VERSION_WIN10_V5_3)
+ return send_modifychannel_with_ack(channel, target_vp);
+ return send_modifychannel_without_ack(channel, target_vp);
}
EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
@@ -385,7 +450,7 @@ nomem:
* @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
* @send_offset: the offset (in bytes) where the send ring buffer starts,
- * should be 0 for BUFFER type gpadl
+ * should be 0 for BUFFER type gpadl
* @gpadl_handle: some funky thing
*/
static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
@@ -653,7 +718,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (newchannel->rescind) {
err = -ENODEV;
- goto error_free_info;
+ goto error_clean_msglist;
}
err = vmbus_post_msg(open_msg,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index f0ed730e2e4e..caf6d0c4bc1b 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -333,7 +333,6 @@ fw_error:
negop->icversion_data[1].minor = icmsg_minor;
return found_match;
}
-
EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
/*
@@ -593,10 +592,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
* CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK
*
* Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
- * CPU2's SEARCH from *not* seeing CPU1's INSERT
+ * CPU2's SEARCH from *not* seeing CPU1's INSERT
*
* Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
- * CPU2's LOAD from *not* seing CPU1's STORE
+ * CPU2's LOAD from *not* seing CPU1's STORE
*/
cpus_read_lock();
@@ -756,6 +755,12 @@ static void init_vp_index(struct vmbus_channel *channel)
free_cpumask_var(available_mask);
}
+#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */
+#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */
+#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
+#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */
+#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
+
static void vmbus_wait_for_unload(void)
{
int cpu;
@@ -773,12 +778,17 @@ static void vmbus_wait_for_unload(void)
* vmbus_connection.unload_event. If not, the last thing we can do is
* read message pages for all CPUs directly.
*
- * Wait no more than 10 seconds so that the panic path can't get
- * hung forever in case the response message isn't seen.
+ * Wait up to 100 seconds since an Azure host must writeback any dirty
+ * data in its disk cache before the VMbus UNLOAD request will
+ * complete. This flushing has been empirically observed to take up
+ * to 50 seconds in cases with a lot of dirty data, so allow additional
+ * leeway and for inaccuracies in mdelay(). But eventually time out so
+ * that the panic path can't get hung forever in case the response
+ * message isn't seen.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
if (completion_done(&vmbus_connection.unload_event))
- break;
+ goto completed;
for_each_online_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
@@ -801,9 +811,18 @@ static void vmbus_wait_for_unload(void)
vmbus_signal_eom(msg, message_type);
}
- mdelay(10);
+ /*
+ * Give a notice periodically so someone watching the
+ * serial output won't think it is completely hung.
+ */
+ if (!(i % UNLOAD_MSG_LOOPS))
+ pr_notice("Waiting for VMBus UNLOAD to complete\n");
+
+ mdelay(UNLOAD_DELAY_UNIT_MS);
}
+ pr_err("Continuing even though VMBus UNLOAD did not complete\n");
+completed:
/*
* We're crashing and already got the UNLOAD_RESPONSE, cleanup all
* maybe-pending messages on all CPUs to be able to receive new
@@ -827,6 +846,11 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
/*
* This is a global event; just wakeup the waiting thread.
* Once we successfully unload, we can cleanup the monitor state.
+ *
+ * NB. A malicious or compromised Hyper-V could send a spurious
+ * message of type CHANNELMSG_UNLOAD_RESPONSE, and trigger a call
+ * of the complete() below. Make sure that unload_event has been
+ * initialized by the time this complete() is executed.
*/
complete(&vmbus_connection.unload_event);
}
@@ -842,7 +866,7 @@ void vmbus_initiate_unload(bool crash)
if (vmbus_proto_version < VERSION_WIN8_1)
return;
- init_completion(&vmbus_connection.unload_event);
+ reinit_completion(&vmbus_connection.unload_event);
memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
hdr.msgtype = CHANNELMSG_UNLOAD;
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
@@ -980,7 +1004,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
* UNLOCK channel_mutex
*
* Forbids: r1 == valid_relid &&
- * channels[valid_relid] == channel
+ * channels[valid_relid] == channel
*
* Note. r1 can be INVALID_RELID only for an hv_sock channel.
* None of the hv_sock channels which were present before the
@@ -1313,6 +1337,46 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
}
/*
+ * vmbus_onmodifychannel_response - Modify Channel response handler.
+ *
+ * This is invoked when we received a response to our channel modify request.
+ * Find the matching request, copy the response and signal the requesting thread.
+ */
+static void vmbus_onmodifychannel_response(struct vmbus_channel_message_header *hdr)
+{
+ struct vmbus_channel_modifychannel_response *response;
+ struct vmbus_channel_msginfo *msginfo;
+ unsigned long flags;
+
+ response = (struct vmbus_channel_modifychannel_response *)hdr;
+
+ trace_vmbus_onmodifychannel_response(response);
+
+ /*
+ * Find the modify msg, copy the response and signal/unblock the wait event.
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) {
+ struct vmbus_channel_message_header *responseheader =
+ (struct vmbus_channel_message_header *)msginfo->msg;
+
+ if (responseheader->msgtype == CHANNELMSG_MODIFYCHANNEL) {
+ struct vmbus_channel_modifychannel *modifymsg;
+
+ modifymsg = (struct vmbus_channel_modifychannel *)msginfo->msg;
+ if (modifymsg->child_relid == response->child_relid) {
+ memcpy(&msginfo->response.modify_response, response,
+ sizeof(*response));
+ complete(&msginfo->waitevent);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
+/*
* vmbus_ongpadl_torndown - GPADL torndown handler.
*
* This is invoked when we received a response to our gpadl teardown request.
@@ -1429,6 +1493,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
{ CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
{ CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
{ CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
+ { CHANNELMSG_MODIFYCHANNEL_RESPONSE, 1, vmbus_onmodifychannel_response,
+ sizeof(struct vmbus_channel_modifychannel_response)},
};
/*
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index c83612cddb99..311cd005b3be 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -26,9 +26,11 @@
struct vmbus_connection vmbus_connection = {
.conn_state = DISCONNECTED,
+ .unload_event = COMPLETION_INITIALIZER(
+ vmbus_connection.unload_event),
.next_gpadl_handle = ATOMIC_INIT(0xE1E10),
- .ready_for_suspend_event= COMPLETION_INITIALIZER(
+ .ready_for_suspend_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_suspend_event),
.ready_for_resume_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_resume_event),
@@ -45,6 +47,7 @@ EXPORT_SYMBOL_GPL(vmbus_proto_version);
* Table of VMBus versions listed from newest to oldest.
*/
static __u32 vmbus_versions[] = {
+ VERSION_WIN10_V5_3,
VERSION_WIN10_V5_2,
VERSION_WIN10_V5_1,
VERSION_WIN10_V5,
@@ -60,7 +63,7 @@ static __u32 vmbus_versions[] = {
* Maximal VMBus protocol version guests can negotiate. Useful to cap the
* VMBus version for testing and debugging purpose.
*/
-static uint max_version = VERSION_WIN10_V5_2;
+static uint max_version = VERSION_WIN10_V5_3;
module_param(max_version, uint, S_IRUGO);
MODULE_PARM_DESC(max_version,
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index f202ac7f4b3d..e83507f49676 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -13,9 +13,10 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
-#include <linux/version.h>
#include <linux/random.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -37,6 +38,42 @@ int hv_init(void)
}
/*
+ * Functions for allocating and freeing memory with size and
+ * alignment HV_HYP_PAGE_SIZE. These functions are needed because
+ * the guest page size may not be the same as the Hyper-V page
+ * size. We depend upon kmalloc() aligning power-of-two size
+ * allocations to the allocation size boundary, so that the
+ * allocated memory appears to Hyper-V as a page of the size
+ * it expects.
+ */
+
+void *hv_alloc_hyperv_page(void)
+{
+ BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
+
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ return (void *)__get_free_page(GFP_KERNEL);
+ else
+ return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+}
+
+void *hv_alloc_hyperv_zeroed_page(void)
+{
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ else
+ return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+}
+
+void hv_free_hyperv_page(unsigned long addr)
+{
+ if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+ free_page(addr);
+ else
+ kfree((void *)addr);
+}
+
+/*
* hv_post_message - Post a message using the hypervisor message IPC.
*
* This involves a hypercall.
@@ -68,7 +105,7 @@ int hv_post_message(union hv_connection_id connection_id,
*/
put_cpu_ptr(hv_cpu);
- return status & 0xFFFF;
+ return hv_result(status);
}
int hv_synic_alloc(void)
@@ -162,34 +199,48 @@ void hv_synic_enable_regs(unsigned int cpu)
union hv_synic_scontrol sctrl;
/* Setup the Synic's message page */
- hv_get_simp(simp.as_uint64);
+ simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> HV_HYP_PAGE_SHIFT;
- hv_set_simp(simp.as_uint64);
+ hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
- hv_get_siefp(siefp.as_uint64);
+ siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> HV_HYP_PAGE_SHIFT;
- hv_set_siefp(siefp.as_uint64);
+ hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
/* Setup the shared SINT. */
- hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ if (vmbus_irq != -1)
+ enable_percpu_irq(vmbus_irq, 0);
+ shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
+ VMBUS_MESSAGE_SINT);
- shared_sint.vector = hv_get_vector();
+ shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
- shared_sint.auto_eoi = hv_recommend_using_aeoi();
- hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+
+ /*
+ * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
+ * it doesn't provide a recommendation flag and AEOI must be disabled.
+ */
+#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
+ shared_sint.auto_eoi =
+ !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
+#else
+ shared_sint.auto_eoi = 0;
+#endif
+ hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
/* Enable the global synic bit */
- hv_get_synic_state(sctrl.as_uint64);
+ sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.enable = 1;
- hv_set_synic_state(sctrl.as_uint64);
+ hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
}
int hv_synic_init(unsigned int cpu)
@@ -211,30 +262,71 @@ void hv_synic_disable_regs(unsigned int cpu)
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
- hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
+ VMBUS_MESSAGE_SINT);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
- hv_get_simp(simp.as_uint64);
+ simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 0;
simp.base_simp_gpa = 0;
- hv_set_simp(simp.as_uint64);
+ hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
- hv_get_siefp(siefp.as_uint64);
+ siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
siefp.base_siefp_gpa = 0;
- hv_set_siefp(siefp.as_uint64);
+ hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
/* Disable the global synic bit */
- hv_get_synic_state(sctrl.as_uint64);
+ sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.enable = 0;
- hv_set_synic_state(sctrl.as_uint64);
+ hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+
+ if (vmbus_irq != -1)
+ disable_percpu_irq(vmbus_irq);
+}
+
+#define HV_MAX_TRIES 3
+/*
+ * Scan the event flags page of 'this' CPU looking for any bit that is set. If we find one
+ * bit set, then wait for a few milliseconds. Repeat these steps for a maximum of 3 times.
+ * Return 'true', if there is still any set bit after this operation; 'false', otherwise.
+ *
+ * If a bit is set, that means there is a pending channel interrupt. The expectation is
+ * that the normal interrupt handling mechanism will find and process the channel interrupt
+ * "very soon", and in the process clear the bit.
+ */
+static bool hv_synic_event_pending(void)
+{
+ struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
+ union hv_synic_event_flags *event =
+ (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
+ unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
+ bool pending;
+ u32 relid;
+ int tries = 0;
+
+retry:
+ pending = false;
+ for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
+ /* Special case - VMBus channel protocol messages */
+ if (relid == 0)
+ continue;
+ pending = true;
+ break;
+ }
+ if (pending && tries++ < HV_MAX_TRIES) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ return pending;
}
int hv_synic_cleanup(unsigned int cpu)
@@ -242,6 +334,9 @@ int hv_synic_cleanup(unsigned int cpu)
struct vmbus_channel *channel, *sc;
bool channel_found = false;
+ if (vmbus_connection.conn_state != CONNECTED)
+ goto always_cleanup;
+
/*
* Hyper-V does not provide a way to change the connect CPU once
* it is set; we must prevent the connect CPU from going offline
@@ -249,8 +344,7 @@ int hv_synic_cleanup(unsigned int cpu)
* path where the vmbus is already disconnected, the CPU must be
* allowed to shut down.
*/
- if (cpu == VMBUS_CONNECT_CPU &&
- vmbus_connection.conn_state == CONNECTED)
+ if (cpu == VMBUS_CONNECT_CPU)
return -EBUSY;
/*
@@ -277,9 +371,21 @@ int hv_synic_cleanup(unsigned int cpu)
}
mutex_unlock(&vmbus_connection.channel_mutex);
- if (channel_found && vmbus_connection.conn_state == CONNECTED)
+ if (channel_found)
+ return -EBUSY;
+
+ /*
+ * channel_found == false means that any channels that were previously
+ * assigned to the CPU have been reassigned elsewhere with a call of
+ * vmbus_send_modifychannel(). Scan the event flags page looking for
+ * bits that are set and waiting with a timeout for vmbus_chan_sched()
+ * to process such bits. If bits are still set after this operation
+ * and VMBus is connected, fail the CPU offlining operation.
+ */
+ if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
return -EBUSY;
+always_cleanup:
hv_stimer_legacy_cleanup(cpu);
hv_synic_disable_regs(cpu);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 2f776d78e3c1..58af84e30144 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -21,6 +21,7 @@
#include <linux/memory.h>
#include <linux/notifier.h>
#include <linux/percpu_counter.h>
+#include <linux/page_reporting.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
@@ -563,6 +564,8 @@ struct hv_dynmem_device {
* The negotiated version agreed by host.
*/
__u32 version;
+
+ struct page_reporting_dev_info pr_dev_info;
};
static struct hv_dynmem_device dm_device;
@@ -1568,6 +1571,89 @@ static void balloon_onchannelcallback(void *context)
}
+/* Hyper-V only supports reporting 2MB pages or higher */
+#define HV_MIN_PAGE_REPORTING_ORDER 9
+#define HV_MIN_PAGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << HV_MIN_PAGE_REPORTING_ORDER)
+static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
+ struct scatterlist *sgl, unsigned int nents)
+{
+ unsigned long flags;
+ struct hv_memory_hint *hint;
+ int i;
+ u64 status;
+ struct scatterlist *sg;
+
+ WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
+ WARN_ON_ONCE(sgl->length < HV_MIN_PAGE_REPORTING_LEN);
+ local_irq_save(flags);
+ hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg);
+ if (!hint) {
+ local_irq_restore(flags);
+ return -ENOSPC;
+ }
+
+ hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
+ hint->reserved = 0;
+ for_each_sg(sgl, sg, nents, i) {
+ union hv_gpa_page_range *range;
+
+ range = &hint->ranges[i];
+ range->address_space = 0;
+ /* page reporting only reports 2MB pages or higher */
+ range->page.largepage = 1;
+ range->page.additional_pages =
+ (sg->length / HV_MIN_PAGE_REPORTING_LEN) - 1;
+ range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
+ range->base_large_pfn =
+ page_to_hvpfn(sg_page(sg)) >> HV_MIN_PAGE_REPORTING_ORDER;
+ }
+
+ status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
+ hint, NULL);
+ local_irq_restore(flags);
+ if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+ pr_err("Cold memory discard hypercall failed with status %llx\n",
+ status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void enable_page_reporting(void)
+{
+ int ret;
+
+ /* Essentially, validating 'PAGE_REPORTING_MIN_ORDER' is big enough. */
+ if (pageblock_order < HV_MIN_PAGE_REPORTING_ORDER) {
+ pr_debug("Cold memory discard is only supported on 2MB pages and above\n");
+ return;
+ }
+
+ if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) {
+ pr_debug("Cold memory discard hint not supported by Hyper-V\n");
+ return;
+ }
+
+ BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
+ dm_device.pr_dev_info.report = hv_free_page_report;
+ ret = page_reporting_register(&dm_device.pr_dev_info);
+ if (ret < 0) {
+ dm_device.pr_dev_info.report = NULL;
+ pr_err("Failed to enable cold memory discard: %d\n", ret);
+ } else {
+ pr_info("Cold memory discard hint enabled\n");
+ }
+}
+
+static void disable_page_reporting(void)
+{
+ if (dm_device.pr_dev_info.report) {
+ page_reporting_unregister(&dm_device.pr_dev_info);
+ dm_device.pr_dev_info.report = NULL;
+ }
+}
+
static int balloon_connect_vsp(struct hv_device *dev)
{
struct dm_version_request version_req;
@@ -1713,6 +1799,7 @@ static int balloon_probe(struct hv_device *dev,
if (ret != 0)
return ret;
+ enable_page_reporting();
dm_device.state = DM_INITIALIZED;
dm_device.thread =
@@ -1727,6 +1814,7 @@ static int balloon_probe(struct hv_device *dev,
probe_error:
dm_device.state = DM_INIT_ERROR;
dm_device.thread = NULL;
+ disable_page_reporting();
vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
@@ -1749,6 +1837,7 @@ static int balloon_remove(struct hv_device *dev)
cancel_work_sync(&dm->ha_wrk.wrk);
kthread_stop(dm->thread);
+ disable_page_reporting();
vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index 6063bb21bb13..c02a1719e92f 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -103,6 +103,21 @@ TRACE_EVENT(vmbus_ongpadl_created,
)
);
+TRACE_EVENT(vmbus_onmodifychannel_response,
+ TP_PROTO(const struct vmbus_channel_modifychannel_response *response),
+ TP_ARGS(response),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = response->child_relid;
+ __entry->status = response->status;
+ ),
+ TP_printk("child_relid 0x%x, status %d",
+ __entry->child_relid, __entry->status
+ )
+ );
+
TRACE_EVENT(vmbus_ongpadl_torndown,
TP_PROTO(const struct vmbus_channel_gpadl_torndown *gpadltorndown),
TP_ARGS(gpadltorndown),
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 35833d4d1a1d..374f8afbf8a5 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -84,15 +84,6 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer->write_index = next_write_location;
}
-/* Set the next read location for the specified ring buffer. */
-static inline void
-hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
- u32 next_read_location)
-{
- ring_info->ring_buffer->read_index = next_read_location;
- ring_info->priv_read_index = next_read_location;
-}
-
/* Get the size of the ring buffer. */
static inline u32
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
@@ -313,7 +304,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
if (rqst_id == VMBUS_RQST_ERROR) {
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- pr_err("No request id available\n");
return -EAGAIN;
}
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 10dce9f91216..b12d6827b222 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -48,8 +48,10 @@ static int hyperv_cpuhp_online;
static void *hv_panic_page;
+static long __percpu *vmbus_evt;
+
/* Values parsed from ACPI DSDT */
-static int vmbus_irq;
+int vmbus_irq;
int vmbus_interrupt;
/*
@@ -1381,7 +1383,13 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(hv_get_vector(), 0);
+ add_interrupt_randomness(vmbus_interrupt, 0);
+}
+
+static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
+{
+ vmbus_isr();
+ return IRQ_HANDLED;
}
/*
@@ -1392,22 +1400,36 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
size_t bytes_written;
- phys_addr_t panic_pa;
/* We are only interested in panics. */
if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
return;
- panic_pa = virt_to_phys(hv_panic_page);
-
/*
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
- if (bytes_written)
- hyperv_report_panic_msg(panic_pa, bytes_written);
+ if (!bytes_written)
+ return;
+ /*
+ * P3 to contain the physical address of the panic page & P4 to
+ * contain the size of the panic data in that page. Rest of the
+ * registers are no-op when the NOTIFY_MSG flag is set.
+ */
+ hv_set_register(HV_REGISTER_CRASH_P0, 0);
+ hv_set_register(HV_REGISTER_CRASH_P1, 0);
+ hv_set_register(HV_REGISTER_CRASH_P2, 0);
+ hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
+ hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
+
+ /*
+ * Let Hyper-V know there is crash data available along with
+ * the panic message.
+ */
+ hv_set_register(HV_REGISTER_CRASH_CTL,
+ (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
}
static struct kmsg_dumper hv_kmsg_dumper = {
@@ -1482,9 +1504,28 @@ static int vmbus_bus_init(void)
if (ret)
return ret;
- ret = hv_setup_vmbus_irq(vmbus_irq, vmbus_isr);
- if (ret)
- goto err_setup;
+ /*
+ * VMbus interrupts are best modeled as per-cpu interrupts. If
+ * on an architecture with support for per-cpu IRQs (e.g. ARM64),
+ * allocate a per-cpu IRQ using standard Linux kernel functionality.
+ * If not on such an architecture (e.g., x86/x64), then rely on
+ * code in the arch-specific portion of the code tree to connect
+ * the VMbus interrupt handler.
+ */
+
+ if (vmbus_irq == -1) {
+ hv_setup_vmbus_handler(vmbus_isr);
+ } else {
+ vmbus_evt = alloc_percpu(long);
+ ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
+ "Hyper-V VMbus", vmbus_evt);
+ if (ret) {
+ pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
+ vmbus_irq, ret);
+ free_percpu(vmbus_evt);
+ goto err_setup;
+ }
+ }
ret = hv_synic_alloc();
if (ret)
@@ -1521,7 +1562,7 @@ static int vmbus_bus_init(void)
* Register for panic kmsg callback only if the right
* capability is supported by the hypervisor.
*/
- hv_get_crash_ctl(hyperv_crash_ctl);
+ hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
hv_kmsg_dump_register();
@@ -1545,7 +1586,12 @@ err_connect:
err_cpuhp:
hv_synic_free();
err_alloc:
- hv_remove_vmbus_irq();
+ if (vmbus_irq == -1) {
+ hv_remove_vmbus_handler();
+ } else {
+ free_percpu_irq(vmbus_irq, vmbus_evt);
+ free_percpu(vmbus_evt);
+ }
err_setup:
bus_unregister(&hv_bus);
unregister_sysctl_table(hv_ctl_table_hdr);
@@ -1802,13 +1848,15 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
if (target_cpu == origin_cpu)
goto cpu_store_unlock;
- if (vmbus_send_modifychannel(channel->offermsg.child_relid,
+ if (vmbus_send_modifychannel(channel,
hv_cpu_number_to_vp_number(target_cpu))) {
ret = -EIO;
goto cpu_store_unlock;
}
/*
+ * For version before VERSION_WIN10_V5_3, the following warning holds:
+ *
* Warning. At this point, there is *no* guarantee that the host will
* have successfully processed the vmbus_send_modifychannel() request.
* See the header comment of vmbus_send_modifychannel() for more info.
@@ -2663,6 +2711,18 @@ static int __init hv_acpi_init(void)
ret = -ETIMEDOUT;
goto cleanup;
}
+
+ /*
+ * If we're on an architecture with a hardcoded hypervisor
+ * vector (i.e. x86/x64), override the VMbus interrupt found
+ * in the ACPI tables. Ensure vmbus_irq is not set since the
+ * normal Linux IRQ mechanism is not used in this case.
+ */
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
+ vmbus_irq = -1;
+#endif
+
hv_debug_init();
ret = vmbus_bus_init();
@@ -2693,7 +2753,12 @@ static void __exit vmbus_exit(void)
vmbus_connection.conn_state = DISCONNECTED;
hv_stimer_global_cleanup();
vmbus_disconnect();
- hv_remove_vmbus_irq();
+ if (vmbus_irq == -1) {
+ hv_remove_vmbus_handler();
+ } else {
+ free_percpu_irq(vmbus_irq, vmbus_evt);
+ free_percpu(vmbus_evt);
+ }
for_each_online_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index dd27b9dbe931..873ef38eb1c8 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
dev_err(dev->dev, "High Speed not supported!\n");
+ t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
dev->master_cfg |= DW_IC_CON_SPEED_FAST;
dev->hs_hcnt = 0;
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 5ac30d95650c..97d4f3ac0abd 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index c45f226c2b85..aa00ba8bcb70 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
+ * Copyright (c) 2014 HiSilicon Limited.
*
* Now only support 7 bit address.
*/
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 8509c5f11356..55177eb21d7b 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -525,8 +525,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
data = *i2c->wbuf;
data &= ~JZ4780_I2C_DC_READ;
- if ((!i2c->stop_hold) && (i2c->cdata->version >=
- ID_X1000))
+ if ((i2c->wt_len == 1) && (!i2c->stop_hold) &&
+ (i2c->cdata->version >= ID_X1000))
data |= X1000_I2C_DC_STOP;
jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
i2c->wbuf++;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index c590d36b5fd1..5c8e94b6cdb5 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -221,6 +221,10 @@ mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data)
writel(0, drv_data->reg_base + drv_data->reg_offsets.ext_addr);
writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP,
drv_data->reg_base + drv_data->reg_offsets.control);
+
+ if (drv_data->errata_delay)
+ udelay(5);
+
drv_data->state = MV64XXX_I2C_STATE_IDLE;
}
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 937c2c8fd349..4933fc8ce3fd 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -534,7 +534,7 @@ static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev)
default:
/*
* N-byte reception:
- * Enable ACK, reset POS (ACK postion) and clear ADDR flag.
+ * Enable ACK, reset POS (ACK position) and clear ADDR flag.
* In that way, ACK will be sent as soon as the current byte
* will be received in the shift register
*/
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 63ebf722a424..f21362355973 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
static int i2c_init_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- char *err_str;
+ char *err_str, *err_level = KERN_ERR;
if (!bri)
return 0;
@@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
return -EPROBE_DEFER;
if (!bri->recover_bus) {
- err_str = "no recover_bus() found";
+ err_str = "no suitable method provided";
+ err_level = KERN_DEBUG;
goto err;
}
@@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
return 0;
err:
- dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
+ dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
adap->bus_recovery_info = NULL;
return -EINVAL;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0abce004a959..65e3e7df8a4b 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
[LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
- .len = sizeof(struct rdma_nla_ls_gid)},
+ .len = sizeof(struct rdma_nla_ls_gid),
+ .validation_type = NLA_VALIDATE_MIN,
+ .min = sizeof(struct rdma_nla_ls_gid)},
};
static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 81903749d241..e42c812e74c3 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.dev->rdev.lldi.rxq_ids[0], true);
+ ep->com.dev->rdev.lldi.rxq_ids[0],
+ ep->com.local_addr.ss_family == AF_INET6);
if (err)
goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 2a91b8d95e12..04b1e8f021f6 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
*/
int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{
- int node = pcibus_to_node(dd->pcidev->bus);
struct hfi1_affinity_node *entry;
const struct cpumask *local_mask;
int curr_cpu, possible, i, ret;
bool new_entry = false;
- /*
- * If the BIOS does not have the NUMA node information set, select
- * NUMA 0 so we get consistent performance.
- */
- if (node < 0) {
- dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
- node = 0;
- }
- dd->node = node;
-
local_mask = cpumask_of_node(dd->node);
if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0);
@@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* create an entry in the global affinity structure and initialize it.
*/
if (!entry) {
- entry = node_affinity_allocate(node);
+ entry = node_affinity_allocate(dd->node);
if (!entry) {
dd_dev_err(dd,
"Unable to allocate global affinity node\n");
@@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (new_entry)
node_affinity_add_tail(entry);
+ dd->affinity_entry = entry;
mutex_unlock(&node_affinity.lock);
return 0;
@@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
{
struct hfi1_affinity_node *entry;
- if (dd->node < 0)
- return;
-
mutex_lock(&node_affinity.lock);
+ if (!dd->affinity_entry)
+ goto unlock;
entry = node_affinity_lookup(dd->node);
if (!entry)
goto unlock;
@@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
*/
_dev_comp_vect_cpu_mask_clean_up(dd, entry);
unlock:
+ dd->affinity_entry = NULL;
mutex_unlock(&node_affinity.lock);
- dd->node = NUMA_NO_NODE;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index e09e8244a94c..2a9a040569eb 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1409,6 +1409,7 @@ struct hfi1_devdata {
spinlock_t irq_src_lock;
int vnic_num_vports;
struct net_device *dummy_netdev;
+ struct hfi1_affinity_node *affinity_entry;
/* Keeps track of IPoIB RSM rule users */
atomic_t ipoib_rsm_usr_num;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index cb7ad1288821..786c6316273f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
dd->pport = (struct hfi1_pportdata *)(dd + 1);
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
- dd->node = NUMA_NO_NODE;
ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
GFP_KERNEL);
@@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+ /*
+ * If the BIOS does not have the NUMA node information set, select
+ * NUMA 0 so we get consistent performance.
+ */
+ dd->node = pcibus_to_node(pdev->bus);
+ if (dd->node == NUMA_NO_NODE) {
+ dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
+ dd->node = 0;
+ }
/*
* Initialize all locks for the device. This needs to be as early as
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 1fb6e1a0e4e1..1bcab992ac26 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
return 0;
}
- cpumask_and(node_cpu_mask, cpu_mask,
- cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
+ cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
available_cpus = cpumask_weight(node_cpu_mask);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 0eb6a7a618e0..9ea542270ed4 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
* TGT QP isn't associated with RQ/SQ
*/
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
- (attrs->qp_type != IB_QPT_XRC_TGT)) {
+ (attrs->qp_type != IB_QPT_XRC_TGT) &&
+ (attrs->qp_type != IB_QPT_XRC_INI)) {
struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 0a08b4b742a3..6734329cca33 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
/* Now it is safe to iterate over all paths without locks */
list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
- rtrs_clt_destroy_sess_files(sess, NULL);
rtrs_clt_close_conns(sess, true);
+ rtrs_clt_destroy_sess_files(sess, NULL);
kobject_put(&sess->kobj);
}
free_clt(clt);
diff --git a/drivers/input/joystick/n64joy.c b/drivers/input/joystick/n64joy.c
index 8bcc529942bc..9dbca366613e 100644
--- a/drivers/input/joystick/n64joy.c
+++ b/drivers/input/joystick/n64joy.c
@@ -252,8 +252,8 @@ static int __init n64joy_probe(struct platform_device *pdev)
mutex_init(&priv->n64joy_mutex);
priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!priv->reg_base) {
- err = -EINVAL;
+ if (IS_ERR(priv->reg_base)) {
+ err = PTR_ERR(priv->reg_base);
goto fail;
}
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index 63d5e488137d..e9fa1423f136 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
+static int nspire_keypad_open(struct input_dev *input)
{
+ struct nspire_keypad *keypad = input_get_drvdata(input);
unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
+ int error;
+
+ error = clk_prepare_enable(keypad->clk);
+ if (error)
+ return error;
cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
if (cycles_per_us == 0)
@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
keypad->int_mask = 1 << 1;
writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
- /* Disable GPIO interrupts to prevent hanging on touchpad */
- /* Possibly used to detect touchpad events */
- writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
- /* Acknowledge existing interrupts */
- writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
-
- return 0;
-}
-
-static int nspire_keypad_open(struct input_dev *input)
-{
- struct nspire_keypad *keypad = input_get_drvdata(input);
- int error;
-
- error = clk_prepare_enable(keypad->clk);
- if (error)
- return error;
-
- error = nspire_keypad_chip_init(keypad);
- if (error) {
- clk_disable_unprepare(keypad->clk);
- return error;
- }
-
return 0;
}
@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
{
struct nspire_keypad *keypad = input_get_drvdata(input);
+ /* Disable interrupts */
+ writel(0, keypad->reg_base + KEYPAD_INTMSK);
+ /* Acknowledge existing interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_INT);
+
clk_disable_unprepare(keypad->clk);
}
@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ error = clk_prepare_enable(keypad->clk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return error;
+ }
+
+ /* Disable interrupts */
+ writel(0, keypad->reg_base + KEYPAD_INTMSK);
+ /* Acknowledge existing interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_INT);
+
+ /* Disable GPIO interrupts to prevent hanging on touchpad */
+ /* Possibly used to detect touchpad events */
+ writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
+ /* Acknowledge existing GPIO interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
+
+ clk_disable_unprepare(keypad->clk);
+
input_set_drvdata(input, keypad);
input->id.bustype = BUS_HOST;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 9119e12a5778..a5a003553646 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
+ }, {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 4c2b579f6c8b..5f7706febcb0 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1441,7 +1441,7 @@ static int elants_i2c_probe(struct i2c_client *client,
touchscreen_parse_properties(ts->input, true, &ts->prop);
- if (ts->chip_id == EKTF3624) {
+ if (ts->chip_id == EKTF3624 && ts->phy_x && ts->phy_y) {
/* calculate resolution from size */
ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x);
ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y);
@@ -1449,8 +1449,7 @@ static int elants_i2c_probe(struct i2c_client *client,
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
- if (ts->major_res > 0)
- input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
+ input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index b63d7fdf0cd2..85a1f465c097 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
u8 major = event[4];
u8 minor = event[5];
u8 z = event[6] & S6SY761_MASK_Z;
- u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
- u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
+ u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
+ u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
input_mt_slot(sdata->input, tid);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 15536e321df5..c8f57e3e058d 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -279,8 +279,13 @@ config XTENSA_MX
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config XILINX_INTC
- bool
+ bool "Xilinx Interrupt Controller IP"
+ depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP
select IRQ_DOMAIN
+ help
+ Support for the Xilinx Interrupt Controller IP core.
+ This is used as a primary controller with MicroBlaze and can also
+ be used as a secondary chained controller on other platforms.
config IRQ_CROSSBAR
bool
@@ -577,4 +582,15 @@ config MST_IRQ
help
Support MStar Interrupt Controller.
+config WPCM450_AIC
+ bool "Nuvoton WPCM450 Advanced Interrupt Controller"
+ depends on ARCH_WPCM450
+ help
+ Support for the interrupt controller in the Nuvoton WPCM450 BMC SoC.
+
+config IRQ_IDT3243X
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c59b95a0532c..18573602a939 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -113,3 +113,5 @@ obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o
obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o
obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
+obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
+obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
index 6567ed782f82..58717cd44f99 100644
--- a/drivers/irqchip/irq-aspeed-vic.c
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -71,7 +71,7 @@ static void vic_init_hw(struct aspeed_vic *vic)
writel(0, vic->base + AVIC_INT_SELECT);
writel(0, vic->base + AVIC_INT_SELECT + 4);
- /* Some interrupts have a programable high/low level trigger
+ /* Some interrupts have a programmable high/low level trigger
* (4 GPIO direct inputs), for now we assume this was configured
* by firmware. We read which ones are edge now.
*/
@@ -203,7 +203,7 @@ static int __init avic_of_init(struct device_node *node,
}
vic->base = regs;
- /* Initialize soures, all masked */
+ /* Initialize sources, all masked */
vic_init_hw(vic);
/* Ready to receive interrupts */
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index c7c9e976acbb..ad59656ccc28 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -309,7 +309,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (data->can_wake) {
/* This IRQ chip can wake the system, set all
- * relevant child interupts in wake_enabled mask
+ * relevant child interrupts in wake_enabled mask
*/
gc->wake_enabled = 0xffffffff;
gc->wake_enabled &= ~gc->unused;
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index 5a2ec43b7ddd..ab91afa86755 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -176,7 +176,7 @@ gx_intc_init(struct device_node *node, struct device_node *parent)
writel(0x0, reg_base + GX_INTC_NEN63_32);
/*
- * Initial mask reg with all unmasked, because we only use enalbe reg
+ * Initial mask reg with all unmasked, because we only use enable reg
*/
writel(0x0, reg_base + GX_INTC_NMASK31_00);
writel(0x0, reg_base + GX_INTC_NMASK63_32);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index fbec07d634ad..4116b48e60af 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -371,7 +371,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
* the MSI data is the absolute value within the range from
* spi_start to (spi_start + num_spis).
*
- * Broadom NS2 GICv2m implementation has an erratum where the MSI data
+ * Broadcom NS2 GICv2m implementation has an erratum where the MSI data
* is 'spi_number - 32'
*
* Reading that register fails on the Graviton implementation
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ed46e6057e33..c3485b230d70 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1492,7 +1492,7 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
*
* Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
* value or to 1023, depending on the enable bit. But that
- * would be issueing a mapping for an /existing/ DevID+EventID
+ * would be issuing a mapping for an /existing/ DevID+EventID
* pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
* to the /same/ vPE, using this opportunity to adjust the
* doorbell. Mouahahahaha. We loves it, Precious.
@@ -3122,7 +3122,7 @@ static void its_cpu_init_lpis(void)
/*
* It's possible for CPU to receive VLPIs before it is
- * sheduled as a vPE, especially for the first CPU, and the
+ * scheduled as a vPE, especially for the first CPU, and the
* VLPI with INTID larger than 2^(IDbits+1) will be considered
* as out of range and dropped by GIC.
* So we initialize IDbits to known value to avoid VLPI drop.
@@ -3616,7 +3616,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
/*
* If all interrupts have been freed, start mopping the
- * floor. This is conditionned on the device not being shared.
+ * floor. This is conditioned on the device not being shared.
*/
if (!its_dev->shared &&
bitmap_empty(its_dev->event_map.lpi_map,
@@ -4194,7 +4194,7 @@ static int its_sgi_set_affinity(struct irq_data *d,
{
/*
* There is no notion of affinity for virtual SGIs, at least
- * not on the host (since they can only be targetting a vPE).
+ * not on the host (since they can only be targeting a vPE).
* Tell the kernel we've done whatever it asked for.
*/
irq_data_update_effective_affinity(d, mask_val);
@@ -4239,7 +4239,7 @@ static int its_sgi_get_irqchip_state(struct irq_data *d,
/*
* Locking galore! We can race against two different events:
*
- * - Concurent vPE affinity change: we must make sure it cannot
+ * - Concurrent vPE affinity change: we must make sure it cannot
* happen, or we'll talk to the wrong redistributor. This is
* identical to what happens with vLPIs.
*
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index 563a9b366294..e81e89a81cb5 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
reg = of_get_property(np, "mbi-alias", NULL);
if (reg) {
mbi_phys_base = of_translate_address(np, reg);
- if (mbi_phys_base == OF_BAD_ADDR) {
+ if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
ret = -ENXIO;
goto err_free_mbi;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index eb0ee356a629..37a23aa6de37 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
irqnr = gic_read_iar();
+ /* Check for special IDs first */
+ if ((irqnr >= 1020 && irqnr <= 1023))
+ return;
+
if (gic_supports_nmi() &&
unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
gic_handle_nmi(irqnr, regs);
@@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
gic_arch_enable_irqs();
}
- /* Check for special IDs first */
- if ((irqnr >= 1020 && irqnr <= 1023))
- return;
-
if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
else
@@ -1379,7 +1379,7 @@ static int gic_irq_domain_translate(struct irq_domain *d,
/*
* Make it clear that broken DTs are... broken.
- * Partitionned PPIs are an unfortunate exception.
+ * Partitioned PPIs are an unfortunate exception.
*/
WARN_ON(*type == IRQ_TYPE_NONE &&
fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 5d1dc9915272..4ea71b28f9f5 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -87,17 +87,40 @@ static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops;
static const struct irq_domain_ops *sgi_domain_ops;
+#ifdef CONFIG_ARM64
+#include <asm/cpufeature.h>
+
+bool gic_cpuif_has_vsgi(void)
+{
+ unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT);
+
+ return fld >= 0x3;
+}
+#else
+bool gic_cpuif_has_vsgi(void)
+{
+ return false;
+}
+#endif
+
static bool has_v4_1(void)
{
return !!sgi_domain_ops;
}
+static bool has_v4_1_sgi(void)
+{
+ return has_v4_1() && gic_cpuif_has_vsgi();
+}
+
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
{
char *name;
int sgi_base;
- if (!has_v4_1())
+ if (!has_v4_1_sgi())
return 0;
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
@@ -182,7 +205,7 @@ static void its_free_sgi_irqs(struct its_vm *vm)
{
int i;
- if (!has_v4_1())
+ if (!has_v4_1_sgi())
return;
for (i = 0; i < vm->nr_vpes; i++) {
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index a6ed877d9dd3..058ebaebe2c4 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Hisilicon HiP04 INTC
+ * HiSilicon HiP04 INTC
*
* Copyright (C) 2002-2014 ARM Limited.
- * Copyright (c) 2013-2014 Hisilicon Ltd.
+ * Copyright (c) 2013-2014 HiSilicon Ltd.
* Copyright (c) 2013-2014 Linaro Ltd.
*
* Interrupt architecture for the HIP04 INTC:
diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c
new file mode 100644
index 000000000000..f0996820077a
--- /dev/null
+++ b/drivers/irqchip/irq-idt3243x.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for IDT/Renesas 79RC3243x Interrupt Controller.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define IDT_PIC_NR_IRQS 32
+
+#define IDT_PIC_IRQ_PEND 0x00
+#define IDT_PIC_IRQ_MASK 0x08
+
+struct idt_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ struct irq_chip_generic *gc;
+};
+
+static void idt_irq_dispatch(struct irq_desc *desc)
+{
+ struct idt_pic_data *idtpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq, virq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = irq_reg_readl(idtpic->gc, IDT_PIC_IRQ_PEND);
+ pending &= ~idtpic->gc->mask_cache;
+ while (pending) {
+ hwirq = __fls(pending);
+ virq = irq_linear_revmap(idtpic->irq_domain, hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static int idt_pic_init(struct device_node *of_node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ struct idt_pic_data *idtpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ idtpic = kzalloc(sizeof(*idtpic), GFP_KERNEL);
+ if (!idtpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ idtpic->base = of_iomap(of_node, 0);
+ if (!idtpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ domain = irq_domain_add_linear(of_node, IDT_PIC_NR_IRQS,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+ idtpic->irq_domain = domain;
+
+ ret = irq_alloc_domain_generic_chips(domain, 32, 1, "IDTPIC",
+ handle_level_irq, 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+ if (ret)
+ goto out_domain_remove;
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = idtpic->base;
+ gc->private = idtpic;
+
+ ct = gc->chip_types;
+ ct->regs.mask = IDT_PIC_IRQ_MASK;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ idtpic->gc = gc;
+
+ /* Mask interrupts. */
+ writel(0xffffffff, idtpic->base + IDT_PIC_IRQ_MASK);
+ gc->mask_cache = 0xffffffff;
+
+ irq_set_chained_handler_and_data(parent_irq,
+ idt_irq_dispatch, idtpic);
+
+ return 0;
+
+out_domain_remove:
+ irq_domain_remove(domain);
+out_iounmap:
+ iounmap(idtpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(idtpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(idt_pic, "idt,32434-pic", idt_pic_init);
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 033bccb41455..5f47d8ee4ae3 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -100,11 +100,11 @@ static int __init aic_irq_of_init(struct device_node *node,
jcore_aic.irq_unmask = noop;
jcore_aic.name = "AIC";
- domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops,
+ domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
+ &jcore_aic_irqdomain_ops,
&jcore_aic);
if (!domain)
return -ENOMEM;
- irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq);
return 0;
}
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 9bf6b9a5f734..f790ca6d78aa 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -163,7 +163,7 @@ static void pch_pic_reset(struct pch_pic *priv)
int i;
for (i = 0; i < PIC_COUNT; i++) {
- /* Write vectore ID */
+ /* Write vectored ID */
writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
/* Hardcode route to HT0 Lo */
writeb(1, priv->base + PCH_INT_ROUTE(i));
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index ff7627b57772..2cb45c6b8501 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2015 Hisilicon Limited, All Rights Reserved.
+ * Copyright (C) 2015 HiSilicon Limited, All Rights Reserved.
* Author: Jun Ma <majun258@huawei.com>
* Author: Yun Wu <wuyun.wu@huawei.com>
*/
@@ -390,4 +390,4 @@ module_platform_driver(mbigen_platform_driver);
MODULE_AUTHOR("Jun Ma <majun258@huawei.com>");
MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Hisilicon MBI Generator driver");
+MODULE_DESCRIPTION("HiSilicon MBI Generator driver");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index bc7aebcc96e9..e50676ce2ec8 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -227,7 +227,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
/*
* Get the hwirq number assigned to this channel through
- * a pointer the channel_irq table. The added benifit of this
+ * a pointer the channel_irq table. The added benefit of this
* method is that we can also retrieve the channel index with
* it, using the table base.
*/
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
index 143657b0cf28..f6133ae28155 100644
--- a/drivers/irqchip/irq-mst-intc.c
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -13,15 +13,27 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
-#define INTC_MASK 0x0
-#define INTC_EOI 0x20
+#define MST_INTC_MAX_IRQS 64
+
+#define INTC_MASK 0x0
+#define INTC_REV_POLARITY 0x10
+#define INTC_EOI 0x20
+
+#ifdef CONFIG_PM_SLEEP
+static LIST_HEAD(mst_intc_list);
+#endif
struct mst_intc_chip_data {
raw_spinlock_t lock;
unsigned int irq_start, nr_irqs;
void __iomem *base;
bool no_eoi;
+#ifdef CONFIG_PM_SLEEP
+ struct list_head entry;
+ u16 saved_polarity_conf[DIV_ROUND_UP(MST_INTC_MAX_IRQS, 16)];
+#endif
};
static void mst_set_irq(struct irq_data *d, u32 offset)
@@ -78,6 +90,24 @@ static void mst_intc_eoi_irq(struct irq_data *d)
irq_chip_eoi_parent(d);
}
+static int mst_irq_chip_set_type(struct irq_data *data, unsigned int type)
+{
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ case IRQ_TYPE_EDGE_FALLING:
+ mst_set_irq(data, INTC_REV_POLARITY);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_EDGE_RISING:
+ mst_clear_irq(data, INTC_REV_POLARITY);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH);
+}
+
static struct irq_chip mst_intc_chip = {
.name = "mst-intc",
.irq_mask = mst_intc_mask_irq,
@@ -87,13 +117,62 @@ static struct irq_chip mst_intc_chip = {
.irq_set_irqchip_state = irq_chip_set_parent_state,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
- .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_type = mst_irq_chip_set_type,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
};
+#ifdef CONFIG_PM_SLEEP
+static void mst_intc_polarity_save(struct mst_intc_chip_data *cd)
+{
+ int i;
+ void __iomem *addr = cd->base + INTC_REV_POLARITY;
+
+ for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++)
+ cd->saved_polarity_conf[i] = readw_relaxed(addr + i * 4);
+}
+
+static void mst_intc_polarity_restore(struct mst_intc_chip_data *cd)
+{
+ int i;
+ void __iomem *addr = cd->base + INTC_REV_POLARITY;
+
+ for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++)
+ writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4);
+}
+
+static void mst_irq_resume(void)
+{
+ struct mst_intc_chip_data *cd;
+
+ list_for_each_entry(cd, &mst_intc_list, entry)
+ mst_intc_polarity_restore(cd);
+}
+
+static int mst_irq_suspend(void)
+{
+ struct mst_intc_chip_data *cd;
+
+ list_for_each_entry(cd, &mst_intc_list, entry)
+ mst_intc_polarity_save(cd);
+ return 0;
+}
+
+static struct syscore_ops mst_irq_syscore_ops = {
+ .suspend = mst_irq_suspend,
+ .resume = mst_irq_resume,
+};
+
+static int __init mst_irq_pm_init(void)
+{
+ register_syscore_ops(&mst_irq_syscore_ops);
+ return 0;
+}
+late_initcall(mst_irq_pm_init);
+#endif
+
static int mst_intc_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
@@ -145,6 +224,15 @@ static int mst_intc_domain_alloc(struct irq_domain *domain, unsigned int virq,
parent_fwspec = *fwspec;
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param[1] = cd->irq_start + hwirq;
+
+ /*
+ * mst-intc latch the interrupt request if it's edge triggered,
+ * so the output signal to parent GIC is always level sensitive.
+ * And if the irq signal is active low, configure it to active high
+ * to meet GIC SPI spec in mst_irq_chip_set_type via REV_POLARITY bit.
+ */
+ parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec);
}
@@ -193,6 +281,10 @@ static int __init mst_intc_of_init(struct device_node *dn,
return -ENOMEM;
}
+#ifdef CONFIG_PM_SLEEP
+ INIT_LIST_HEAD(&cd->entry);
+ list_add_tail(&cd->entry, &mst_intc_list);
+#endif
return 0;
}
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
index 69ba8ce3c178..9bca0918078e 100644
--- a/drivers/irqchip/irq-mtk-cirq.c
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -217,7 +217,7 @@ static void mtk_cirq_resume(void)
{
u32 value;
- /* flush recored interrupts, will send signals to parent controller */
+ /* flush recorded interrupts, will send signals to parent controller */
value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index a671938fd97f..d1f5740cd575 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -58,7 +58,7 @@ struct icoll_priv {
static struct icoll_priv icoll_priv;
static struct irq_domain *icoll_domain;
-/* calculate bit offset depending on number of intterupt per register */
+/* calculate bit offset depending on number of interrupt per register */
static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
{
/*
@@ -68,7 +68,7 @@ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
return bit << ((d->hwirq & 3) << 3);
}
-/* calculate mem offset depending on number of intterupt per register */
+/* calculate mem offset depending on number of interrupt per register */
static void __iomem *icoll_intr_reg(struct irq_data *d)
{
/* offset = hwirq / intr_per_reg * 0x10 */
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 6f432d2a5ceb..97d4d04b0a80 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -77,8 +77,8 @@ struct plic_handler {
void __iomem *enable_base;
struct plic_priv *priv;
};
-static int plic_parent_irq;
-static bool plic_cpuhp_setup_done;
+static int plic_parent_irq __ro_after_init;
+static bool plic_cpuhp_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static inline void plic_toggle(struct plic_handler *handler,
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 8662d7b7b262..b9db90c4aa56 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -193,7 +193,14 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
{ .exti = 23, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
{ .exti = 24, .irq_parent = 95, .chip = &stm32_exti_h_chip_direct },
{ .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 26, .irq_parent = 37, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 27, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 28, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 29, .irq_parent = 71, .chip = &stm32_exti_h_chip_direct },
{ .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 31, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 32, .irq_parent = 82, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 33, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct },
{ .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
{ .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct },
{ .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct },
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index fb78d6623556..9ea94456b178 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -189,7 +189,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
* 3) spurious irq
* So if we immediately get a reading of 0, check the irq-pending reg
* to differentiate between 2 and 3. We only do this once to avoid
- * the extra check in the common case of 1 hapening after having
+ * the extra check in the common case of 1 happening after having
* read the vector-reg once.
*/
hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 9e456497c1c4..9a63b02b8176 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -60,6 +60,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
break;
case IRQ_TYPE_NONE:
flow_type = IRQ_TYPE_LEVEL_LOW;
+ fallthrough;
case IRQ_TYPE_LEVEL_LOW:
mod ^= im;
pol ^= im;
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 532d0ae172d9..ca1f593f4d13 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -78,7 +78,7 @@ struct ti_sci_inta_vint_desc {
* struct ti_sci_inta_irq_domain - Structure representing a TISCI based
* Interrupt Aggregator IRQ domain.
* @sci: Pointer to TISCI handle
- * @vint: TISCI resource pointer representing IA inerrupts.
+ * @vint: TISCI resource pointer representing IA interrupts.
* @global_event: TISCI resource pointer representing global events.
* @vint_list: List of the vints active in the system
* @vint_mutex: Mutex to protect vint_list
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index e46036374227..62f3d29f9042 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -163,7 +163,7 @@ static struct syscore_ops vic_syscore_ops = {
};
/**
- * vic_pm_init - initicall to register VIC pm
+ * vic_pm_init - initcall to register VIC pm
*
* This is called via late_initcall() to register
* the resources for the VICs due to the early
@@ -397,7 +397,7 @@ static void __init vic_clear_interrupts(void __iomem *base)
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
- * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * replicating two blocks 0x00..0x1f in 0x20..0x3f. In that case
* the probe function is called twice, with base set to offset 000
* and 020 within the page. We call this "second block".
*/
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
new file mode 100644
index 000000000000..f3ac392d5bc8
--- /dev/null
+++ b/drivers/irqchip/irq-wpcm450-aic.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2021 Jonathan NeuschÀfer
+
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/printk.h>
+
+#include <asm/exception.h>
+
+#define AIC_SCR(x) ((x)*4) /* Source control registers */
+#define AIC_GEN 0x84 /* Interrupt group enable control register */
+#define AIC_GRSR 0x88 /* Interrupt group raw status register */
+#define AIC_IRSR 0x100 /* Interrupt raw status register */
+#define AIC_IASR 0x104 /* Interrupt active status register */
+#define AIC_ISR 0x108 /* Interrupt status register */
+#define AIC_IPER 0x10c /* Interrupt priority encoding register */
+#define AIC_ISNR 0x110 /* Interrupt source number register */
+#define AIC_IMR 0x114 /* Interrupt mask register */
+#define AIC_OISR 0x118 /* Output interrupt status register */
+#define AIC_MECR 0x120 /* Mask enable command register */
+#define AIC_MDCR 0x124 /* Mask disable command register */
+#define AIC_SSCR 0x128 /* Source set command register */
+#define AIC_SCCR 0x12c /* Source clear command register */
+#define AIC_EOSCR 0x130 /* End of service command register */
+
+#define AIC_SCR_SRCTYPE_LOW_LEVEL (0 << 6)
+#define AIC_SCR_SRCTYPE_HIGH_LEVEL (1 << 6)
+#define AIC_SCR_SRCTYPE_NEG_EDGE (2 << 6)
+#define AIC_SCR_SRCTYPE_POS_EDGE (3 << 6)
+#define AIC_SCR_PRIORITY(x) (x)
+#define AIC_SCR_PRIORITY_MASK 0x7
+
+#define AIC_NUM_IRQS 32
+
+struct wpcm450_aic {
+ void __iomem *regs;
+ struct irq_domain *domain;
+};
+
+static struct wpcm450_aic *aic;
+
+static void wpcm450_aic_init_hw(void)
+{
+ int i;
+
+ /* Disable (mask) all interrupts */
+ writel(0xffffffff, aic->regs + AIC_MDCR);
+
+ /*
+ * Make sure the interrupt controller is ready to serve new interrupts.
+ * Reading from IPER indicates that the nIRQ signal may be deasserted,
+ * and writing to EOSCR indicates that interrupt handling has finished.
+ */
+ readl(aic->regs + AIC_IPER);
+ writel(0, aic->regs + AIC_EOSCR);
+
+ /* Initialize trigger mode and priority of each interrupt source */
+ for (i = 0; i < AIC_NUM_IRQS; i++)
+ writel(AIC_SCR_SRCTYPE_HIGH_LEVEL | AIC_SCR_PRIORITY(7),
+ aic->regs + AIC_SCR(i));
+}
+
+static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs)
+{
+ int hwirq;
+
+ /* Determine the interrupt source */
+ /* Read IPER to signal that nIRQ can be de-asserted */
+ hwirq = readl(aic->regs + AIC_IPER) / 4;
+
+ handle_domain_irq(aic->domain, hwirq, regs);
+}
+
+static void wpcm450_aic_eoi(struct irq_data *d)
+{
+ /* Signal end-of-service */
+ writel(0, aic->regs + AIC_EOSCR);
+}
+
+static void wpcm450_aic_mask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq);
+
+ /* Disable (mask) the interrupt */
+ writel(mask, aic->regs + AIC_MDCR);
+}
+
+static void wpcm450_aic_unmask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq);
+
+ /* Enable (unmask) the interrupt */
+ writel(mask, aic->regs + AIC_MECR);
+}
+
+static int wpcm450_aic_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ /*
+ * The hardware supports high/low level, as well as rising/falling edge
+ * modes, and the DT binding accommodates for that, but as long as
+ * other modes than high level mode are not used and can't be tested,
+ * they are rejected in this driver.
+ */
+ if ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip wpcm450_aic_chip = {
+ .name = "wpcm450-aic",
+ .irq_eoi = wpcm450_aic_eoi,
+ .irq_mask = wpcm450_aic_mask,
+ .irq_unmask = wpcm450_aic_unmask,
+ .irq_set_type = wpcm450_aic_set_type,
+};
+
+static int wpcm450_aic_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)
+{
+ if (hwirq >= AIC_NUM_IRQS)
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &wpcm450_aic_chip, handle_fasteoi_irq);
+ irq_set_chip_data(irq, aic);
+ irq_set_probe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops wpcm450_aic_ops = {
+ .map = wpcm450_aic_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __init wpcm450_aic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ if (parent)
+ return -EINVAL;
+
+ aic = kzalloc(sizeof(*aic), GFP_KERNEL);
+ if (!aic)
+ return -ENOMEM;
+
+ aic->regs = of_iomap(node, 0);
+ if (!aic->regs) {
+ pr_err("Failed to map WPCM450 AIC registers\n");
+ return -ENOMEM;
+ }
+
+ wpcm450_aic_init_hw();
+
+ set_handle_irq(wpcm450_aic_handle_irq);
+
+ aic->domain = irq_domain_add_linear(node, AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(wpcm450_aic, "nuvoton,wpcm450-aic", wpcm450_aic_of_init);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 1d3d273309bd..8cd1bfc73057 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -210,7 +210,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
/*
* Disable all external interrupts until they are
- * explicity requested.
+ * explicitly requested.
*/
xintc_write(irqc, IER, 0);
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 7b2f4d0ae3fe..2f9a289ab245 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -2,7 +2,7 @@
/*
* CZ.NIC's Turris Omnia LEDs driver
*
- * 2020 by Marek Behun <marek.behun@nic.cz>
+ * 2020 by Marek BehĂșn <kabel@kernel.org>
*/
#include <linux/i2c.h>
@@ -287,6 +287,6 @@ static struct i2c_driver omnia_leds_driver = {
module_i2c_driver(omnia_leds_driver);
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
MODULE_DESCRIPTION("CZ.NIC's Turris Omnia LEDs");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
index 9f2ce7f03c67..456a117a65fd 100644
--- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -2,7 +2,7 @@
/*
* rWTM BIU Mailbox driver for Armada 37xx
*
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek BehĂșn <kabel@kernel.org>
*/
#include <linux/device.h>
@@ -203,4 +203,4 @@ module_platform_driver(armada_37xx_mbox_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 66f4c6398f67..cea2b3789736 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
u8 *res;
position = (index + rsb) * v->fec->roots;
- block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
+ block = div64_u64_rem(position, v->fec->io_size, &rem);
*offset = (unsigned)rem;
res = dm_bufio_read(v->fec->bufio, block, buf);
@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
/* read the next block when we run out of parity bytes */
offset += v->fec->roots;
- if (offset >= v->fec->roots << SECTOR_SHIFT) {
+ if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
return -E2BIG;
}
+ if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
+ f->io_size = 1 << v->data_dev_block_bits;
+ else
+ f->io_size = v->fec->roots << SECTOR_SHIFT;
+
f->bufio = dm_bufio_client_create(f->dev->bdev,
- f->roots << SECTOR_SHIFT,
+ f->io_size,
1, 0, NULL, NULL);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 42fbd3a7fc9f..3c46c8d61883 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -36,6 +36,7 @@ struct dm_verity_fec {
struct dm_dev *dev; /* parity data device */
struct dm_bufio_client *data_bufio; /* for data dev access */
struct dm_bufio_client *bufio; /* for parity data access */
+ size_t io_size; /* IO size for roots */
sector_t start; /* parity data start in blocks */
sector_t blocks; /* number of blocks covered */
sector_t rounds; /* number of interleaving rounds */
diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
index 744b230cdcca..dd7eb614c28e 100644
--- a/drivers/mfd/intel_pmt.c
+++ b/drivers/mfd/intel_pmt.c
@@ -49,10 +49,14 @@ enum pmt_quirks {
/* Use shift instead of mask to read discovery table offset */
PMT_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ PMT_QUIRK_NO_DVSEC = BIT(3),
};
struct pmt_platform_info {
unsigned long quirks;
+ struct intel_dvsec_header **capabilities;
};
static const struct pmt_platform_info tgl_info = {
@@ -60,6 +64,26 @@ static const struct pmt_platform_info tgl_info = {
PMT_QUIRK_TABLE_SHIFT,
};
+/* DG1 Platform with DVSEC quirk*/
+static struct intel_dvsec_header dg1_telemetry = {
+ .length = 0x10,
+ .id = 2,
+ .num_entries = 1,
+ .entry_size = 3,
+ .tbir = 0,
+ .offset = 0x466000,
+};
+
+static struct intel_dvsec_header *dg1_capabilities[] = {
+ &dg1_telemetry,
+ NULL
+};
+
+static const struct pmt_platform_info dg1_info = {
+ .quirks = PMT_QUIRK_NO_DVSEC,
+ .capabilities = dg1_capabilities,
+};
+
static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
unsigned long quirks)
{
@@ -79,19 +103,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
case DVSEC_INTEL_ID_WATCHER:
if (quirks & PMT_QUIRK_NO_WATCHER) {
dev_info(dev, "Watcher not supported\n");
- return 0;
+ return -EINVAL;
}
name = "pmt_watcher";
break;
case DVSEC_INTEL_ID_CRASHLOG:
if (quirks & PMT_QUIRK_NO_CRASHLOG) {
dev_info(dev, "Crashlog not supported\n");
- return 0;
+ return -EINVAL;
}
name = "pmt_crashlog";
break;
default:
- dev_err(dev, "Unrecognized PMT capability: %d\n", id);
return -EINVAL;
}
@@ -148,41 +171,54 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (info)
quirks = info->quirks;
- do {
- struct intel_dvsec_header header;
- u32 table;
- u16 vid;
-
- pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
- if (!pos)
- break;
-
- pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid);
- if (vid != PCI_VENDOR_ID_INTEL)
- continue;
-
- pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2,
- &header.id);
- pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES,
- &header.num_entries);
- pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE,
- &header.entry_size);
- pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE,
- &table);
-
- header.tbir = INTEL_DVSEC_TABLE_BAR(table);
- header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
-
- ret = pmt_add_dev(pdev, &header, quirks);
- if (ret) {
- dev_warn(&pdev->dev,
- "Failed to add device for DVSEC id %d\n",
- header.id);
- continue;
- }
+ if (info && (info->quirks & PMT_QUIRK_NO_DVSEC)) {
+ struct intel_dvsec_header **header;
+
+ header = info->capabilities;
+ while (*header) {
+ ret = pmt_add_dev(pdev, *header, quirks);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "Failed to add device for DVSEC id %d\n",
+ (*header)->id);
+ else
+ found_devices = true;
- found_devices = true;
- } while (true);
+ ++header;
+ }
+ } else {
+ do {
+ struct intel_dvsec_header header;
+ u32 table;
+ u16 vid;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
+ if (!pos)
+ break;
+
+ pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid);
+ if (vid != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2,
+ &header.id);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES,
+ &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE,
+ &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE,
+ &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ ret = pmt_add_dev(pdev, &header, quirks);
+ if (ret)
+ continue;
+
+ found_devices = true;
+ } while (true);
+ }
if (!found_devices)
return -ENODEV;
@@ -200,10 +236,12 @@ static void pmt_pci_remove(struct pci_dev *pdev)
}
#define PCI_DEVICE_ID_INTEL_PMT_ADL 0x467d
+#define PCI_DEVICE_ID_INTEL_PMT_DG1 0x490e
#define PCI_DEVICE_ID_INTEL_PMT_OOBMSM 0x09a7
#define PCI_DEVICE_ID_INTEL_PMT_TGL 0x9a0d
static const struct pci_device_id pmt_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, PMT_ADL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, PMT_DG1, &dg1_info) },
{ PCI_DEVICE_DATA(INTEL, PMT_OOBMSM, NULL) },
{ PCI_DEVICE_DATA(INTEL, PMT_TGL, &tgl_info) },
{ }
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 110f5a8538e9..0e8254d0cf0b 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -134,6 +134,23 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
__lkdtm_CORRUPT_STACK((void *)&data);
}
+static pid_t stack_pid;
+static unsigned long stack_addr;
+
+void lkdtm_REPORT_STACK(void)
+{
+ volatile uintptr_t magic;
+ pid_t pid = task_pid_nr(current);
+
+ if (pid != stack_pid) {
+ pr_info("Starting stack offset tracking for pid %d\n", pid);
+ stack_pid = pid;
+ stack_addr = (uintptr_t)&magic;
+ }
+
+ pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
+}
+
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index b2aff4d87c01..8024b6a5cc7f 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -110,6 +110,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(EXHAUST_STACK),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(REPORT_STACK),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 5ae48c64df24..99f90d3e5e9c 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -17,6 +17,7 @@ void lkdtm_LOOP(void);
void lkdtm_EXHAUST_STACK(void);
void lkdtm_CORRUPT_STACK(void);
void lkdtm_CORRUPT_STACK_STRONG(void);
+void lkdtm_REPORT_STACK(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index eb6c02bc4a02..b8b771b643cc 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -247,8 +247,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
*/
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->length % data->blksz) {
- WARN_ONCE(1, "unaligned sg len %u blksize %u\n",
- sg->length, data->blksz);
+ dev_warn_once(mmc_dev(mmc),
+ "unaligned sg len %u blksize %u, disabling descriptor DMA for transfer\n",
+ sg->length, data->blksz);
return;
}
}
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 57f1f1708994..5c5c92132287 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
return 0;
case NAND_OP_WAITRDY_INSTR:
return readl_poll_timeout(nfc->regs + NFI_STA, status,
- status & STA_BUSY, 20,
- instr->ctx.waitrdy.timeout_ms);
+ !(status & STA_BUSY), 20,
+ instr->ctx.waitrdy.timeout_ms * 1000);
default:
break;
}
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index f69fb4238a65..a57da43680d8 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
return ret;
}
+static int mcp251x_spi_write(struct spi_device *spi, int len)
+{
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+ int ret;
+
+ ret = spi_write(spi, priv->spi_tx_buf, len);
+ if (ret)
+ dev_err(&spi->dev, "spi write failed: ret = %d\n", ret);
+
+ return ret;
+}
+
static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = val;
- mcp251x_spi_trans(spi, 3);
+ mcp251x_spi_write(spi, 3);
}
static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
@@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
priv->spi_tx_buf[2] = v1;
priv->spi_tx_buf[3] = v2;
- mcp251x_spi_trans(spi, 4);
+ mcp251x_spi_write(spi, 4);
}
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
- mcp251x_spi_trans(spi, 4);
+ mcp251x_spi_write(spi, 4);
}
static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
buf[i]);
} else {
memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
- mcp251x_spi_trans(spi, TXBDAT_OFF + len);
+ mcp251x_spi_write(spi, TXBDAT_OFF + len);
}
}
@@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
/* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
- mcp251x_spi_trans(priv->spi, 1);
+ mcp251x_spi_write(priv->spi, 1);
}
static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
@@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
mdelay(MCP251X_OST_DELAY_MS);
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
- ret = mcp251x_spi_trans(spi, 1);
+ ret = mcp251x_spi_write(spi, 1);
if (ret)
return ret;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 573b11559d73..28e916a04047 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -857,7 +857,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
if (dev->adapter->dev_set_bus) {
err = dev->adapter->dev_set_bus(dev, 0);
if (err)
- goto lbl_unregister_candev;
+ goto adap_dev_free;
}
/* get device number early */
@@ -869,6 +869,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
return 0;
+adap_dev_free:
+ if (dev->adapter->dev_free)
+ dev->adapter->dev_free(dev);
+
lbl_unregister_candev:
unregister_candev(netdev);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 52e865a3912c..bf5c62e5c0b0 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -93,8 +93,12 @@
/* GSWIP MII Registers */
#define GSWIP_MII_CFGp(p) (0x2 * (p))
+#define GSWIP_MII_CFG_RESET BIT(15)
#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_ISOLATE BIT(13)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
+#define GSWIP_MII_CFG_RMII_CLK BIT(7)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
#define GSWIP_MII_CFG_MODE_MIIM 0x1
#define GSWIP_MII_CFG_MODE_RMIIP 0x2
@@ -190,6 +194,23 @@
#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
+#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
+#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
+#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
+#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
+#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
+#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
+#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
+#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
+#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
+#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
+#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
+#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
@@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
GSWIP_SDMA_PCTRLp(port));
if (!dsa_is_cpu_port(ds, port)) {
- u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
- GSWIP_MDIO_PHY_SPEED_AUTO |
- GSWIP_MDIO_PHY_FDUP_AUTO |
- GSWIP_MDIO_PHY_FCONTX_AUTO |
- GSWIP_MDIO_PHY_FCONRX_AUTO |
- (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
-
- gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
- /* Activate MDIO auto polling */
- gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
+ u32 mdio_phy = 0;
+
+ if (phydev)
+ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
}
return 0;
@@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
if (!dsa_is_user_port(ds, port))
return;
- if (!dsa_is_cpu_port(ds, port)) {
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
- GSWIP_MDIO_PHY_LINK_MASK,
- GSWIP_MDIO_PHYp(port));
- /* Deactivate MDIO auto polling */
- gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
- }
-
gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@@ -794,14 +804,32 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
- /* disable PHY auto polling */
+ /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+ * interoperability problem with this auto polling mechanism because
+ * their status registers think that the link is in a different state
+ * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+ * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+ * auto polling state machine consider the link being negotiated with
+ * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+ * to the switch port being completely dead (RX and TX are both not
+ * working).
+ * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+ * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+ * it would work fine for a few minutes to hours and then stop, on
+ * other device it would no traffic could be sent or received at all.
+ * Testing shows that when PHY auto polling is disabled these problems
+ * go away.
+ */
gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
+
/* Configure the MDIO Clock 2.5 MHz */
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
- /* Disable the xMII link */
+ /* Disable the xMII interface and clear it's isolation bit */
for (i = 0; i < priv->hw_info->max_ports; i++)
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+ 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1450,6 +1478,112 @@ unsupported:
return;
}
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+ u32 mdio_phy;
+
+ if (link)
+ mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+ else
+ mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+ phy_interface_t interface)
+{
+ u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_100:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_1000:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+ mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+ break;
+ }
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
+ GSWIP_MAC_CTRL_0p(port));
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (duplex == DUPLEX_FULL) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+ }
+
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
+ GSWIP_MAC_CTRL_0p(port));
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (tx_pause && rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else if (tx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ } else if (rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ }
+
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
+ mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
+ gswip_mdio_mask(priv,
+ GSWIP_MDIO_PHY_FCONTX_MASK |
+ GSWIP_MDIO_PHY_FCONRX_MASK,
+ mdio_phy, GSWIP_MDIO_PHYp(port));
+}
+
static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
@@ -1469,6 +1603,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+
+ /* Configure the RMII clock as output: */
+ miicfg |= GSWIP_MII_CFG_RMII_CLK;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1481,7 +1618,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
"Unsupported interface: %d\n", state->interface);
return;
}
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
+
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+ GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+ miicfg, port);
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1506,6 +1647,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+ if (!dsa_is_cpu_port(ds, port))
+ gswip_port_set_link(priv, port, false);
}
static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -1517,6 +1661,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
+ if (!dsa_is_cpu_port(ds, port)) {
+ gswip_port_set_link(priv, port, true);
+ gswip_port_set_speed(priv, port, speed, interface);
+ gswip_port_set_duplex(priv, port, duplex);
+ gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+ }
+
gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 903d619e08ed..e08bf9377140 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3026,10 +3026,17 @@ out_resources:
return err;
}
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+};
+
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 prod_id;
u16 val;
int err;
@@ -3040,23 +3047,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
mv88e6xxx_reg_unlock(chip);
- if (reg == MII_PHYSID2) {
- /* Some internal PHYs don't have a model number. */
- if (chip->info->family != MV88E6XXX_FAMILY_6165)
- /* Then there is the 6165 family. It gets is
- * PHYs correct. But it can also have two
- * SERDES interfaces in the PHY address
- * space. And these don't have a model
- * number. But they are not PHYs, so we don't
- * want to give them something a PHY driver
- * will recognise.
- *
- * Use the mv88e6390 family model number
- * instead, for anything which really could be
- * a PHY,
- */
- if (!(val & 0x3f0))
- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+ /* Some internal PHYs don't have a model number. */
+ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+ prod_id = family_prod_id_table[chip->info->family];
+ if (prod_id)
+ val |= prod_id >> 4;
}
return err ? err : val;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 187b0b9a6e1d..f78daba60b35 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- ioaddr = pci_resource_start(pdev, 0);
- if (!ioaddr) {
+ if (!pci_resource_len(pdev, 0)) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("card has no PCI IO resources, aborting\n");
err = -ENODEV;
@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
goto err_disable_dev;
}
+
+ ioaddr = pci_resource_start(pdev, 0);
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("io address range already allocated\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ba8321ec1ee7..3305979a9f7c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -180,9 +180,9 @@
#define XGBE_DMA_SYS_AWCR 0x30303030
/* DMA cache settings - PCI device */
-#define XGBE_DMA_PCI_ARCR 0x00000003
-#define XGBE_DMA_PCI_AWCR 0x13131313
-#define XGBE_DMA_PCI_AWARCR 0x00000313
+#define XGBE_DMA_PCI_ARCR 0x000f0f0f
+#define XGBE_DMA_PCI_AWCR 0x0f0f0f0f
+#define XGBE_DMA_PCI_AWARCR 0x00000f0f
/* DMA channel interrupt modes */
#define XGBE_IRQ_MODE_EDGE 0
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 98cf82dea3e4..65981931a798 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -172,6 +172,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
err_free_buf_descs:
dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
+ ring->cpu_addr = NULL;
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 15362d016a87..0f6a6cb7e98d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3239,6 +3239,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
bool cmp_b = false;
bool cmp_c = false;
+ if (!macb_is_gem(bp))
+ return;
+
tp4sp_v = &(fs->h_u.tcp_ip4_spec);
tp4sp_m = &(fs->m_u.tcp_ip4_spec);
@@ -3607,6 +3610,7 @@ static void macb_restore_features(struct macb *bp)
{
struct net_device *netdev = bp->dev;
netdev_features_t features = netdev->features;
+ struct ethtool_rx_fs_item *item;
/* TX checksum offload */
macb_set_txcsum_feature(bp, features);
@@ -3615,6 +3619,9 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxcsum_feature(bp, features);
/* RX Flow Filters */
+ list_for_each_entry(item, &bp->rx_fs_list.list, list)
+ gem_prog_cmp_regs(bp, &item->fs);
+
macb_set_rxflow_feature(bp, features);
}
@@ -3911,6 +3918,7 @@ static int macb_init(struct platform_device *pdev)
reg = gem_readl(bp, DCFG8);
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
GEM_BFEXT(T2SCR, reg));
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
@@ -3921,7 +3929,6 @@ static int macb_init(struct platform_device *pdev)
/* Filtering is supported in hw but don't enable it in kernel now */
dev->hw_features |= NETIF_F_NTUPLE;
/* init Rx flow definitions */
- INIT_LIST_HEAD(&bp->rx_fs_list.list);
bp->rx_fs_list.count = 0;
spin_lock_init(&bp->rx_fs_lock);
} else
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
index b248966837b4..7aad40b2aa73 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -412,7 +412,7 @@
| CN6XXX_INTR_M0UNWI_ERR \
| CN6XXX_INTR_M1UPB0_ERR \
| CN6XXX_INTR_M1UPWI_ERR \
- | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UNB0_ERR \
| CN6XXX_INTR_M1UNWI_ERR \
| CN6XXX_INTR_INSTR_DB_OF_ERR \
| CN6XXX_INTR_SLIST_DB_OF_ERR \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 6c85a10f465c..23a2ebdfd503 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 };
struct sge_qbase_reg_field *sge_qbase;
struct ireg_buf *ch_sge_dbg;
+ u8 padap_running = 0;
int i, rc;
+ u32 size;
- rc = cudbg_get_buff(pdbg_init, dbg_buff,
- sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
- &temp_buff);
+ /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
+ * lead to SGE missing doorbells under heavy traffic. So, only
+ * collect them when adapter is idle.
+ */
+ for_each_port(padap, i) {
+ padap_running = netif_running(padap->port[i]);
+ if (padap_running)
+ break;
+ }
+
+ size = sizeof(*ch_sge_dbg) * 2;
+ if (!padap_running)
+ size += sizeof(*sge_qbase);
+
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
ch_sge_dbg++;
}
- if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
+ !padap_running) {
sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
/* 1 addr reg SGE_QBASE_INDEX and 4 data reg
* SGE_QBASE_MAP[0-3]
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 98829e482bfa..80882cfc370f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1190, 0x1194,
0x11a0, 0x11a4,
0x11b0, 0x11b4,
- 0x11fc, 0x1274,
+ 0x11fc, 0x123c,
+ 0x1254, 0x1274,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 1115b8f9ea4e..a3f5b80888e5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -350,18 +350,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
}
/*
- * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
- * @tx_info - driver specific tls info.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
-{
- return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
- TCB_T_STATE_V(TCB_T_STATE_M),
- CHCR_TCB_STATE_CLOSED, 1);
-}
-
-/*
* chcr_ktls_dev_del: call back for tls_dev_del.
* Remove the tid and l2t entry and close the connection.
* it per connection basis.
@@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
/* clear tid */
if (tx_info->tid != -1) {
- /* clear tcb state and then release tid */
- chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
}
@@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
return 0;
free_tid:
- chcr_ktls_mark_tcb_close(tx_info);
#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
@@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (tx_info->pending_close) {
spin_unlock(&tx_info->lock);
if (!status) {
- /* it's a late success, tcb status is established,
- * mark it close.
- */
- chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tid, tx_info->ip_family);
}
@@ -1664,54 +1645,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
}
/*
- * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
- * sending the same segment again. It will discard the segment which is before
- * the current tx max.
- * @tx_info - driver specific tls info.
- * @q - TX queue.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
- struct sge_eth_txq *q)
-{
- struct fw_ulptx_wr *wr;
- unsigned int ndesc;
- int credits;
- void *pos;
- u32 len;
-
- len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
- ndesc = DIV_ROUND_UP(len, 64);
-
- credits = chcr_txq_avail(&q->q) - ndesc;
- if (unlikely(credits < 0)) {
- chcr_eth_txq_stop(q);
- return NETDEV_TX_BUSY;
- }
-
- pos = &q->q.desc[q->q.pidx];
-
- wr = pos;
- /* ULPTX wr */
- wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
- wr->cookie = 0;
- /* fill len in wr field */
- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
-
- pos += sizeof(*wr);
-
- pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
- TCB_SND_UNA_RAW_W,
- TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
- TCB_SND_UNA_RAW_V(0), 0);
-
- chcr_txq_advance(&q->q, ndesc);
- cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
-
- return 0;
-}
-
-/*
* chcr_end_part_handler: This handler will handle the record which
* is complete or if record's end part is received. T6 adapter has a issue that
* it can't send out TAG with partial record so if its an end part then we have
@@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
+ bool free_skb_if_tx_fails = false;
struct sk_buff *nskb = NULL;
+
/* check if it is a complete record */
if (tls_end_offset == record->len) {
nskb = skb;
@@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
if (last_wr)
dev_kfree_skb_any(skb);
+ else
+ free_skb_if_tx_fails = true;
last_wr = true;
@@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
+ if (free_skb_if_tx_fails)
+ dev_kfree_skb_any(skb);
goto out;
}
tx_info->prev_seq = record->end_seq;
@@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
}
- /* reset snd una, so the middle record won't send the already
- * sent part.
- */
- if (chcr_ktls_update_snd_una(tx_info, q))
- goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
@@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again.
*/
+ spin_lock_irqsave(&tx_ctx->base.lock, flags);
+
do {
- int i;
cxgb4_reclaim_completed_tx(adap, &q->q, true);
- /* lock taken */
- spin_lock_irqsave(&tx_ctx->base.lock, flags);
/* fetch the tls record */
record = tls_get_record(&tx_ctx->base, tcp_seq,
&tx_info->record_no);
@@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset, skb_offset,
0);
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (ret) {
/* free the refcount taken earlier */
if (tls_end_offset < data_len)
dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
goto out;
}
@@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
continue;
}
- /* increase page reference count of the record, so that there
- * won't be any chance of page free in middle if in case stack
- * receives ACK and try to delete the record.
- */
- for (i = 0; i < record->num_frags; i++)
- __skb_frag_ref(&record->frags[i]);
- /* lock cleared */
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
-
-
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
ret = chcr_end_part_handler(tx_info, skb, record,
@@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
data_len = 0;
}
- /* clear the frag ref count which increased locally before */
- for (i = 0; i < record->num_frags; i++) {
- /* clear the frag ref count */
- __skb_frag_unref(&record->frags[i]);
- }
/* if any failure, come out from the loop. */
if (ret) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (th->fin)
dev_kfree_skb_any(skb);
@@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0);
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 252adfa5d837..8a9096aa85cd 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1471,8 +1471,10 @@ dm9000_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev(sizeof(struct board_info));
- if (!ndev)
- return -ENOMEM;
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto out_regulator_disable;
+ }
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index e3a8858915b3..df0eab479d51 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -963,7 +963,7 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
unsigned long flag;
netif_stop_queue(dev);
- tasklet_disable(&np->tx_tasklet);
+ tasklet_disable_in_atomic(&np->tx_tasklet);
iowrite16(0, ioaddr + IntrEnable);
printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
"TxFrameId %2.2x,"
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 1cf8ef717453..3ec4d9fddd52 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -363,7 +363,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
static int gfar_set_mac_addr(struct net_device *dev, void *p)
{
- eth_mac_addr(dev, p);
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e3f81c7e0ce7..b0dbe6dcaa7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3966,7 +3966,6 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
* normalcy is to reset.
* 2. A new reset request from the stack due to timeout
*
- * For the first case,error event might not have ae handle available.
* check if this is a new reset request and we are not here just because
* last reset attempt did not succeed and watchdog hit us again. We will
* know this if last reset request did not occur very recently (watchdog
@@ -3976,14 +3975,14 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
* want to make sure we throttle the reset request. Therefore, we will
* not allow it again before 3*HZ times.
*/
- if (!handle)
- handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time +
HCLGE_RESET_INTERVAL))) {
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- } else if (hdev->default_reset_request) {
+ }
+
+ if (hdev->default_reset_request) {
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
@@ -11211,7 +11210,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
if (ret)
return ret;
- /* RSS indirection table has been configuared by user */
+ /* RSS indirection table has been configured by user */
if (rxfh_configured)
goto out;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 700e068764c8..e295d359e912 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2193,7 +2193,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
- /* PF has initmated that it is about to reset the hardware.
+ /* PF has intimated that it is about to reset the hardware.
* We now have to poll & check if hardware has actually
* completed the reset sequence. On hardware reset completion,
* VF needs to reset the client and ae device.
@@ -2624,14 +2624,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev);
- clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
-
return 0;
}
@@ -3497,7 +3497,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
if (ret)
return ret;
- /* RSS indirection table has been configuared by user */
+ /* RSS indirection table has been configured by user */
if (rxfh_configured)
goto out;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9c6438d3b3a5..ffb2a91750c7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1149,19 +1149,13 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
+ ibmvnic_napi_disable(adapter);
release_resources(adapter);
return rc;
}
netif_tx_start_all_queues(netdev);
- if (prev_state == VNIC_CLOSED) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
- }
-
adapter->state = VNIC_OPEN;
return rc;
}
@@ -1922,7 +1916,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
- int i, rc;
+ int rc;
netdev_dbg(adapter->netdev,
"[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
@@ -2111,10 +2105,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* refresh device's multicast list */
ibmvnic_set_multi(netdev);
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
adapter->reset_reason == VNIC_RESET_MOBILITY)
__netdev_notify_peers(netdev);
@@ -3204,9 +3194,6 @@ restart_loop:
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
- if (next->tx_comp.rcs[i])
- dev_err(dev, "tx error %x\n",
- next->tx_comp.rcs[i]);
index = be32_to_cpu(next->tx_comp.correlators[i]);
if (index & IBMVNIC_TSO_POOL_MASK) {
tx_pool = &adapter->tso_pool[pool];
@@ -3220,7 +3207,13 @@ restart_loop:
num_entries += txbuff->num_entries;
if (txbuff->skb) {
total_bytes += txbuff->skb->len;
- dev_consume_skb_irq(txbuff->skb);
+ if (next->tx_comp.rcs[i]) {
+ dev_err(dev, "tx error %x\n",
+ next->tx_comp.rcs[i]);
+ dev_kfree_skb_irq(txbuff->skb);
+ } else {
+ dev_consume_skb_irq(txbuff->skb);
+ }
txbuff->skb = NULL;
} else {
netdev_warn(adapter->netdev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index cd53981fa5e0..15f93b355099 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -142,6 +142,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d7c13ca9be7d..d627b59ad446 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
case RING_TYPE_XDP:
ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
break;
+ default:
+ ring = NULL;
+ break;
}
if (!ring)
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c70dec65a572..0e92668012e3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \
I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_VEB_TC_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
#define I40E_QUEUE_STAT(_name, _stat) \
@@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
};
+struct i40e_cp_veb_tc_stats {
+ u64 tc_rx_packets;
+ u64 tc_rx_bytes;
+ u64 tc_tx_packets;
+ u64 tc_tx_bytes;
+};
+
static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
- I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
- I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
- I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
- I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+ I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
+ I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
+ I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
+ I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
};
static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
/* Set flow control settings */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
@@ -2217,6 +2227,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
}
/**
+ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
+ * @tc: the TC statistics in VEB structure (veb->tc_stats)
+ * @i: the index of traffic class in (veb->tc_stats) structure to copy
+ *
+ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
+ * one dimensional structure i40e_cp_veb_tc_stats.
+ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
+ * statistics for the given TC.
+ **/
+static struct i40e_cp_veb_tc_stats
+i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
+{
+ struct i40e_cp_veb_tc_stats veb_tc = {
+ .tc_rx_packets = tc->tc_rx_packets[i],
+ .tc_rx_bytes = tc->tc_rx_bytes[i],
+ .tc_tx_packets = tc->tc_tx_packets[i],
+ .tc_tx_bytes = tc->tc_tx_bytes[i],
+ };
+
+ return veb_tc;
+}
+
+/**
* i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
* @pf: the PF device structure
* @i: the priority value to copy
@@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
i40e_gstrings_veb_stats);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
- i40e_gstrings_veb_tc_stats);
+ if (veb_stats) {
+ struct i40e_cp_veb_tc_stats veb_tc =
+ i40e_get_veb_tc_stats(&veb->tc_stats, i);
+
+ i40e_add_ethtool_stats(&data, &veb_tc,
+ i40e_gstrings_veb_tc_stats);
+ } else {
+ i40e_add_ethtool_stats(&data, NULL,
+ i40e_gstrings_veb_tc_stats);
+ }
i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
@@ -5439,7 +5480,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- true, addr, offset, &value, NULL);
+ addr, true, offset, &value, NULL);
if (status)
return -EIO;
data[i] = value;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 17f3b800640e..527023ee4c07 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
- dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
- vsi->netdev->name,
+ dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
cur_multipromisc ? "entering" : "leaving");
}
}
@@ -6738,9 +6737,9 @@ out:
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
- /* registers are set, lets apply */
- if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
- ret = i40e_hw_set_dcb_config(pf, new_cfg);
+ /* registers are set, lets apply */
+ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+ ret = i40e_hw_set_dcb_config(pf, new_cfg);
}
err:
@@ -10573,12 +10572,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
goto end_core_reset;
}
- if (!lock_acquired)
- rtnl_lock();
- ret = i40e_setup_pf_switch(pf, reinit);
- if (ret)
- goto end_unlock;
-
#ifdef CONFIG_I40E_DCB
/* Enable FW to write a default DCB config on link-up
* unless I40E_FLAG_TC_MQPRIO was enabled or DCB
@@ -10593,7 +10586,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_aq_set_dcb_parameters(hw, false, NULL);
dev_warn(&pf->pdev->dev,
"DCB is not supported for X710-T*L 2.5/5G speeds\n");
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
} else {
i40e_aq_set_dcb_parameters(hw, true, NULL);
ret = i40e_init_pf_dcb(pf);
@@ -10607,6 +10600,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
#endif /* CONFIG_I40E_DCB */
+ if (!lock_acquired)
+ rtnl_lock();
+ ret = i40e_setup_pf_switch(pf, reinit);
+ if (ret)
+ goto end_unlock;
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -12359,6 +12357,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
{
int err = 0;
int size;
+ u16 pow;
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
@@ -12377,6 +12376,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
+
+ /* find the next higher power-of-2 of num cpus */
+ pow = roundup_pow_of_two(num_online_cpus());
+ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
@@ -15140,12 +15144,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
* in order to register the netdev
*/
v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
- if (v_idx < 0)
+ if (v_idx < 0) {
+ err = v_idx;
goto err_switch_setup;
+ }
pf->lan_vsi = v_idx;
vsi = pf->vsi[v_idx];
- if (!vsi)
+ if (!vsi) {
+ err = -EFAULT;
goto err_switch_setup;
+ }
vsi->alloc_queue_pairs = 1;
err = i40e_config_netdev(vsi);
if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 5747a99122fb..06b4271219b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2295,8 +2295,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
* @rx_ring: Rx ring being processed
* @xdp: XDP buffer containing the frame
**/
-static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
@@ -2335,7 +2334,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
}
xdp_out:
rcu_read_unlock();
- return ERR_PTR(-result);
+ return result;
}
/**
@@ -2448,6 +2447,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int xdp_xmit = 0;
bool failure = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
#if (PAGE_SIZE < 8192)
frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
@@ -2513,12 +2513,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- skb = i40e_run_xdp(rx_ring, &xdp);
+ xdp_res = i40e_run_xdp(rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
xdp_xmit |= xdp_res;
i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1b6ec9be155a..5d301a466f5c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
**/
static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
{
+ struct i40e_pf *pf = vf->pf;
int i;
i40e_vc_notify_vf_reset(vf);
@@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
* ensure a reset.
*/
for (i = 0; i < 20; i++) {
+ /* If PF is in VFs releasing state reset VF is impossible,
+ * so leave it.
+ */
+ if (test_bit(__I40E_VFS_RELEASING, pf->state))
+ return;
if (i40e_reset_vf(vf, false))
return;
usleep_range(10000, 20000);
@@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf)
return;
+
+ set_bit(__I40E_VFS_RELEASING, pf->state);
while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
usleep_range(1000, 2000);
@@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
}
}
clear_bit(__I40E_VF_DISABLE, pf->state);
+ clear_bit(__I40E_VFS_RELEASING, pf->state);
}
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index fc32c5019b0f..12ca84113587 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -471,7 +471,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
if (!nb_pkts)
- return false;
+ return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
@@ -488,7 +488,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
- return true;
+ return nb_pkts < budget;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 357706444dd5..17101c45cbcd 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -196,7 +196,6 @@ enum ice_state {
__ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
__ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
- __ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
@@ -624,7 +623,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
-bool ice_is_wol_supported(struct ice_pf *pf);
+bool ice_is_wol_supported(struct ice_hw *hw);
int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun);
@@ -642,6 +641,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf);
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event);
int ice_open(struct net_device *netdev);
+int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 3d9475e222cd..a20edf1538a0 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
if (!data) {
data = devm_kcalloc(ice_hw_to_dev(hw),
- sizeof(*data),
ICE_AQC_FW_LOG_ID_MAX,
+ sizeof(*data),
GFP_KERNEL);
if (!data)
return ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index faaa08e8171b..68866f4f0eb0 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -31,8 +31,8 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX,
};
-/* Control Queue timeout settings - max delay 250ms */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+/* Control Queue timeout settings - max delay 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index e42727941ef5..28e834a128c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
/**
* ice_cee_to_dcb_cfg
* @cee_cfg: pointer to CEE configuration struct
- * @dcbcfg: DCB configuration struct
+ * @pi: port information structure
*
* Convert CEE configuration from firmware to DCB configuration
*/
static void
ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
- struct ice_dcbx_cfg *dcbcfg)
+ struct ice_port_info *pi)
{
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
- u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
- u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
u8 i, err, sync, oper, app_index, ice_app_sel_type;
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+ struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
u16 ice_app_prot_id_type;
- /* CEE PG data to ETS config */
+ dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+ dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+ dcbcfg->tlv_status = tlv_status;
+
+ /* CEE PG data */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
/* Note that the FW creates the oper_prio_tc nibbles reversed
@@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
}
}
- /* CEE PFC data to ETS config */
+ /* CEE PFC data */
dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+ /* CEE APP TLV data */
+ if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+ cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
+ else
+ cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
+
app_index = 0;
for (i = 0; i < 3; i++) {
if (i == 0) {
@@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
ice_app_sel_type = ICE_APP_SEL_TCPIP;
ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+
+ for (j = 0; j < cmp_dcbcfg->numapps; j++) {
+ u16 prot_id = cmp_dcbcfg->app[j].prot_id;
+ u8 sel = cmp_dcbcfg->app[j].selector;
+
+ if (sel == ICE_APP_SEL_TCPIP &&
+ (prot_id == ICE_APP_PROT_ID_ISCSI ||
+ prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
+ ice_app_prot_id_type = prot_id;
+ break;
+ }
+ }
} else {
/* FIP APP */
ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
@@ -892,11 +915,8 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
/* CEE mode */
- dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
- dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
- dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
- ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+ ice_cee_to_dcb_cfg(&cee_cfg, pi);
} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 468a63f7eff9..4180f1f35fb8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
while (ice_is_reset_in_progress(pf->state))
usleep_range(1000, 2000);
- set_bit(__ICE_DCBNL_DEVRESET, pf->state);
dev_close(netdev);
netdev_state_change(netdev);
dev_open(netdev, NULL);
netdev_state_change(netdev);
- clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 2dcfa685b763..32ba71a16165 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
/* Get WoL settings based on the HW capability */
- if (ice_is_wol_supported(pf)) {
+ if (ice_is_wol_supported(&pf->hw)) {
wol->supported = WAKE_MAGIC;
wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
} else {
@@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+ if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
return -EOPNOTSUPP;
/* only magic packet is supported */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 8d4e2ad4328d..d13c7fc8fb0a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2620,7 +2620,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
if (!locked)
rtnl_lock();
- err = ice_open(vsi->netdev);
+ err = ice_open_internal(vsi->netdev);
if (!locked)
rtnl_unlock();
@@ -2649,7 +2649,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
if (!locked)
rtnl_lock();
- ice_stop(vsi->netdev);
+ ice_vsi_close(vsi);
if (!locked)
rtnl_unlock();
@@ -3078,7 +3078,6 @@ err_vsi:
bool ice_is_reset_in_progress(unsigned long *state)
{
return test_bit(__ICE_RESET_OICR_RECV, state) ||
- test_bit(__ICE_DCBNL_DEVRESET, state) ||
test_bit(__ICE_PFR_REQ, state) ||
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 2c23c8f468a5..d821c687f239 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3537,15 +3537,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
- * ice_is_wol_supported - get NVM state of WoL
- * @pf: board private structure
+ * ice_is_wol_supported - check if WoL is supported
+ * @hw: pointer to hardware info
*
* Check if WoL is supported based on the HW configuration.
* Returns true if NVM supports and enables WoL for this port, false otherwise
*/
-bool ice_is_wol_supported(struct ice_pf *pf)
+bool ice_is_wol_supported(struct ice_hw *hw)
{
- struct ice_hw *hw = &pf->hw;
u16 wol_ctrl;
/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
@@ -3554,7 +3553,7 @@ bool ice_is_wol_supported(struct ice_pf *pf)
if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
return false;
- return !(BIT(hw->pf_id) & wol_ctrl);
+ return !(BIT(hw->port_info->lport) & wol_ctrl);
}
/**
@@ -4192,28 +4191,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_send_version_unroll;
}
+ /* not a fatal error if this fails */
err = ice_init_nvm_phy_type(pf->hw.port_info);
- if (err) {
+ if (err)
dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
- goto err_send_version_unroll;
- }
+ /* not a fatal error if this fails */
err = ice_update_link_info(pf->hw.port_info);
- if (err) {
+ if (err)
dev_err(dev, "ice_update_link_info failed: %d\n", err);
- goto err_send_version_unroll;
- }
ice_init_link_dflt_override(pf->hw.port_info);
/* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info &
ICE_AQ_MEDIA_AVAILABLE) {
+ /* not a fatal error if this fails */
err = ice_init_phy_user_cfg(pf->hw.port_info);
- if (err) {
+ if (err)
dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
- goto err_send_version_unroll;
- }
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -4568,6 +4564,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
continue;
ice_vsi_free_q_vectors(pf->vsi[v]);
}
+ ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
ice_clear_interrupt_scheme(pf);
pci_save_state(pdev);
@@ -6637,6 +6634,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ if (ice_is_reset_in_progress(pf->state)) {
+ netdev_err(netdev, "can't open net device while reset is in progress");
+ return -EBUSY;
+ }
+
+ return ice_open_internal(netdev);
+}
+
+/**
+ * ice_open_internal - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * Internal ice_open implementation. Should not be used directly except for ice_open and reset
+ * handling routine
+ *
+ * Returns 0 on success, negative value on failure
+ */
+int ice_open_internal(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
@@ -6715,6 +6734,12 @@ int ice_stop(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_is_reset_in_progress(pf->state)) {
+ netdev_err(netdev, "can't stop net device while reset is in progress");
+ return -EBUSY;
+ }
ice_vsi_close(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 67c965a3f5d2..834cbd3f7b31 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
vsi_list_id);
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_NO_MEMORY;
+
/* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list
*/
@@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
fm_entry->fltr_info.vsi_handle == vsi_handle) ||
(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ fm_entry->vsi_list_info &&
(test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
}
@@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
- struct ice_fltr_info *fi;
-
- fi = &fm_entry->fltr_info;
- if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+ if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
- vsi_list_head, fi);
+ vsi_list_head,
+ &fm_entry->fltr_info);
if (status)
return status;
}
@@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
&remove_list_head);
mutex_unlock(rule_lock);
if (status)
- return;
+ goto free_fltr_list;
switch (lkup) {
case ICE_SW_LKUP_MAC:
@@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
break;
}
+free_fltr_list:
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
list_del(&fm_entry->list_entry);
devm_kfree(ice_hw_to_dev(hw), fm_entry);
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a6cb0c35748c..266036b7a49a 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -535,6 +535,7 @@ struct ice_dcb_app_priority_table {
#define ICE_TLV_STATUS_ERR 0x4
#define ICE_APP_PROT_ID_FCOE 0x8906
#define ICE_APP_PROT_ID_ISCSI 0x0cbc
+#define ICE_APP_PROT_ID_ISCSI_860 0x035c
#define ICE_APP_PROT_ID_FIP 0x8914
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 03d9aad516d4..cffb95f8f632 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6536,6 +6536,13 @@ err_setup_tx:
return err;
}
+static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
+{
+ struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
+
+ return q_vector ? q_vector->napi.napi_id : 0;
+}
+
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: pointer to ixgbe_adapter
@@ -6583,7 +6590,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
/* XDP RX-queue info */
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
+ rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
goto err;
rx_ring->xdp_prog = adapter->xdp_prog;
@@ -6892,6 +6899,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
adapter->hw.hw_addr = adapter->io_addr;
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ e_dev_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index e9efe074edc1..f1b9284e0bea 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1265,9 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme)
jwrite32f(jme, JME_APMC, apmc);
}
-static void jme_link_change_tasklet(struct tasklet_struct *t)
+static void jme_link_change_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
+ struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task);
struct net_device *netdev = jme->dev;
int rc;
@@ -1510,7 +1510,7 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
* all other events are ignored
*/
jwrite32(jme, JME_IEVE, intrstat);
- tasklet_schedule(&jme->linkch_task);
+ schedule_work(&jme->linkch_task);
goto out_reenable;
}
@@ -1832,7 +1832,6 @@ jme_open(struct net_device *netdev)
jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
- tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
@@ -1920,7 +1919,7 @@ jme_close(struct net_device *netdev)
JME_NAPI_DISABLE(jme);
- tasklet_kill(&jme->linkch_task);
+ cancel_work_sync(&jme->linkch_task);
tasklet_kill(&jme->txclean_task);
tasklet_kill(&jme->rxclean_task);
tasklet_kill(&jme->rxempty_task);
@@ -3035,6 +3034,7 @@ jme_init_one(struct pci_dev *pdev,
atomic_set(&jme->rx_empty, 1);
tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+ INIT_WORK(&jme->linkch_task, jme_link_change_work);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index a2c3b00d939d..2af76329b4a2 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -411,7 +411,7 @@ struct jme_adapter {
struct tasklet_struct rxempty_task;
struct tasklet_struct rxclean_task;
struct tasklet_struct txclean_task;
- struct tasklet_struct linkch_task;
+ struct work_struct linkch_task;
struct tasklet_struct pcc_task;
unsigned long flags;
u32 reg_txcs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index b051417ede67..9153c9bda96f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
}
enum {
- MLX5_INTERFACE_PROTOCOL_ETH_REP,
MLX5_INTERFACE_PROTOCOL_ETH,
+ MLX5_INTERFACE_PROTOCOL_ETH_REP,
+ MLX5_INTERFACE_PROTOCOL_IB,
MLX5_INTERFACE_PROTOCOL_IB_REP,
MLX5_INTERFACE_PROTOCOL_MPIB,
- MLX5_INTERFACE_PROTOCOL_IB,
MLX5_INTERFACE_PROTOCOL_VNET,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d7d8a68ef23d..d0f9d3cee97d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -246,6 +246,11 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
struct mlx5_devlink_trap *dl_trap;
int err = 0;
+ if (is_mdev_switchdev_mode(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
+ return -EOPNOTSUPP;
+ }
+
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 304b296fe8b9..bc6f77ea0a31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -516,6 +516,7 @@ struct mlx5e_icosq {
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
+ u16 reserved_room;
unsigned long state;
/* control path */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 308fd279669e..89510cac46c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
- do { \
- unsigned long policy_long; \
- u16 *__policy = &(policy); \
- bool _write = (write); \
- \
- policy_long = *__policy; \
- if (_write && *__policy) \
- *__policy = find_first_bit(&policy_long, \
- sizeof(policy_long) * BITS_PER_BYTE);\
- MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
- if (!_write && *__policy) \
- *__policy = 1 << *__policy; \
- } while (0)
-
/* get/set FEC admin field for a given speed */
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
enum mlx5e_fec_supported_link_mode link_mode)
@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index b2cd29847a37..68e54cc1cd16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -186,6 +186,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
}
static int
+mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
+ u32 *labels, u32 *id)
+{
+ if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
+ *id = 0;
+ return 0;
+ }
+
+ if (mapping_add(ct_priv->labels_mapping, labels, id))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void
+mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
+{
+ if (id)
+ mapping_remove(ct_priv->labels_mapping, id);
+}
+
+static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
struct flow_match_control control;
@@ -436,7 +458,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
mlx5e_mod_hdr_detach(ct_priv->dev,
ct_priv->mod_hdr_tbl, zone_rule->mh);
- mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
}
@@ -639,8 +661,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (!meta)
return -EOPNOTSUPP;
- err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
- &attr->ct_attr.ct_labels_id);
+ err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
+ &attr->ct_attr.ct_labels_id);
if (err)
return -EOPNOTSUPP;
if (nat) {
@@ -677,7 +699,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err_mapping:
dealloc_mod_hdr_actions(&mod_acts);
- mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
return err;
}
@@ -745,7 +767,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err_rule:
mlx5e_mod_hdr_detach(ct_priv->dev,
ct_priv->mod_hdr_tbl, zone_rule->mh);
- mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);
err_attr:
@@ -1197,7 +1219,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
if (!priv || !ct_attr->ct_labels_id)
return;
- mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
+ mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
}
int
@@ -1280,7 +1302,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
- if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+ if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 67de2bf36861..e1271998b937 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -21,6 +21,11 @@ enum {
MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
};
+struct mlx5e_encap_key {
+ const struct ip_tunnel_key *ip_tun_key;
+ struct mlx5e_tc_tunnel *tc_tunnel;
+};
+
struct mlx5e_tc_tunnel {
int tunnel_type;
enum mlx5_flow_match_level match_level;
@@ -44,6 +49,8 @@ struct mlx5e_tc_tunnel {
struct flow_cls_offload *f,
void *headers_c,
void *headers_v);
+ bool (*encap_info_equal)(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
@@ -101,6 +108,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
void *headers_c,
void *headers_v);
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b);
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 7f7b0f6dcdf9..9f16ad2c0710 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -476,16 +476,11 @@ void mlx5e_detach_decap(struct mlx5e_priv *priv,
mlx5e_decap_dealloc(priv, d);
}
-struct encap_key {
- const struct ip_tunnel_key *ip_tun_key;
- struct mlx5e_tc_tunnel *tc_tunnel;
-};
-
-static int cmp_encap_info(struct encap_key *a,
- struct encap_key *b)
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b)
{
- return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
- a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 &&
+ a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
}
static int cmp_decap_info(struct mlx5e_decap_key *a,
@@ -494,7 +489,7 @@ static int cmp_decap_info(struct mlx5e_decap_key *a,
return memcmp(&a->key, &b->key, sizeof(b->key));
}
-static int hash_encap_info(struct encap_key *key)
+static int hash_encap_info(struct mlx5e_encap_key *key)
{
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
key->tc_tunnel->tunnel_type);
@@ -516,18 +511,18 @@ static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
}
static struct mlx5e_encap_entry *
-mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key,
uintptr_t hash_key)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_key e_key;
struct mlx5e_encap_entry *e;
- struct encap_key e_key;
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
e_key.ip_tun_key = &e->tun_info->key;
e_key.tc_tunnel = e->tunnel;
- if (!cmp_encap_info(&e_key, key) &&
+ if (e->tunnel->encap_info_equal(&e_key, key) &&
mlx5e_encap_take(e))
return e;
}
@@ -694,8 +689,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
unsigned long tbl_time_before = 0;
- struct encap_key key;
struct mlx5e_encap_entry *e;
+ struct mlx5e_encap_key key;
bool entry_created = false;
unsigned short family;
uintptr_t hash_key;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 7ed3f9f79f11..f5b26f5a7de4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -329,6 +329,34 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
}
+static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b)
+{
+ struct ip_tunnel_info *a_info;
+ struct ip_tunnel_info *b_info;
+ bool a_has_opts, b_has_opts;
+
+ if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
+ return false;
+
+ a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+ b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+
+ /* keys are equal when both don't have any options attached */
+ if (!a_has_opts && !b_has_opts)
+ return true;
+
+ if (a_has_opts != b_has_opts)
+ return false;
+
+ /* geneve options stored in memory next to ip_tunnel_info struct */
+ a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
+ b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
+
+ return a_info->options_len == b_info->options_len &&
+ memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0;
+}
+
struct mlx5e_tc_tunnel geneve_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_GENEVE,
.match_level = MLX5_MATCH_L4,
@@ -338,4 +366,5 @@ struct mlx5e_tc_tunnel geneve_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_geneve,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_geneve,
.parse_tunnel = mlx5e_tc_tun_parse_geneve,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_geneve,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index 2805416c32a3..ada14f0574dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -94,4 +94,5 @@ struct mlx5e_tc_tunnel gre_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_gretap,
.parse_udp_ports = NULL,
.parse_tunnel = mlx5e_tc_tun_parse_gretap,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 3479672e84cf..60952b33b568 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -131,4 +131,5 @@ struct mlx5e_tc_tunnel mplsoudp_tunnel = {
.generate_ip_tun_hdr = generate_ip_tun_hdr,
.parse_udp_ports = parse_udp_ports,
.parse_tunnel = parse_tunnel,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 038a0f1cecec..4267f3a1059e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -150,4 +150,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 2371b83dad9c..055c3bc23733 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -441,4 +441,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
return wqe_size * 2 - 1;
}
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+{
+ u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+
+ return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index d06532d0baa4..19d22a63313f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -46,7 +46,8 @@ struct mlx5e_ktls_offload_context_rx {
struct tls12_crypto_info_aes_gcm_128 crypto_info;
struct accel_rule rule;
struct sock *sk;
- struct mlx5e_rq_stats *stats;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx;
u32 tirn;
u32 key_id;
@@ -137,11 +138,10 @@ post_static_params(struct mlx5e_icosq *sq,
{
struct mlx5e_set_tls_static_params_wqe *wqe;
struct mlx5e_icosq_wqe_info wi;
- u16 pi, num_wqebbs, room;
+ u16 pi, num_wqebbs;
num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
- room = mlx5e_stop_room_for_wqe(num_wqebbs);
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+ if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -168,11 +168,10 @@ post_progress_params(struct mlx5e_icosq *sq,
{
struct mlx5e_set_tls_progress_params_wqe *wqe;
struct mlx5e_icosq_wqe_info wi;
- u16 pi, num_wqebbs, room;
+ u16 pi, num_wqebbs;
num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
- room = mlx5e_stop_room_for_wqe(num_wqebbs);
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+ if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -218,7 +217,7 @@ unlock:
return err;
err_out:
- priv_rx->stats->tls_resync_req_skip++;
+ priv_rx->rq_stats->tls_resync_req_skip++;
err = PTR_ERR(cseg);
complete(&priv_rx->add_ctx);
goto unlock;
@@ -277,17 +276,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
buf->priv_rx = priv_rx;
- BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
-
spin_lock_bh(&sq->channel->async_icosq_lock);
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+ if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
spin_unlock_bh(&sq->channel->async_icosq_lock);
err = -ENOSPC;
goto err_dma_unmap;
}
- pi = mlx5e_icosq_get_next_pi(sq, 1);
+ pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
#define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
@@ -307,7 +304,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
- .num_wqebbs = 1,
+ .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
.tls_get_params.buf = buf,
};
icosq_fill_wi(sq, pi, &wi);
@@ -322,7 +319,7 @@ err_dma_unmap:
err_free:
kfree(buf);
err_out:
- priv_rx->stats->tls_resync_req_skip++;
+ priv_rx->rq_stats->tls_resync_req_skip++;
return err;
}
@@ -378,13 +375,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
- priv_rx->stats->tls_resync_res_skip++;
+ priv_rx->rq_stats->tls_resync_res_skip++;
err = PTR_ERR(cseg);
goto unlock;
}
/* Do not increment priv_rx refcnt, CQE handling is empty */
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- priv_rx->stats->tls_resync_res_ok++;
+ priv_rx->rq_stats->tls_resync_res_ok++;
unlock:
spin_unlock_bh(&c->async_icosq_lock);
@@ -420,13 +417,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
- priv_rx->stats->tls_resync_req_skip++;
+ priv_rx->rq_stats->tls_resync_req_skip++;
goto out;
}
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
- priv_rx->stats->tls_resync_req_end++;
+ priv_rx->rq_stats->tls_resync_req_end++;
out:
mlx5e_ktls_priv_rx_put(priv_rx);
dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
@@ -609,7 +606,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->rxq = rxq;
priv_rx->sk = sk;
- priv_rx->stats = &priv->channel_stats[rxq].rq;
+ priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+ priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
rqtn = priv->direct_tir[rxq].rqt.rqtn;
@@ -630,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
if (err)
goto err_post_wqes;
- priv_rx->stats->tls_ctx++;
+ atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx);
return 0;
@@ -666,7 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
if (cancel_work_sync(&resync->work))
mlx5e_ktls_priv_rx_put(priv_rx);
- priv_rx->stats->tls_del++;
+ atomic64_inc(&priv_rx->sw_stats->rx_tls_del);
if (priv_rx->rule.rule)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index d16def68ecff..51bdf71073f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
+#include "en_accel/tls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ struct mlx5e_tls_sw_stats *sw_stats;
u32 expected_seq;
u32 tisn;
u32 key_id;
@@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (err)
goto err_create_key;
+ priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
goto err_create_tis;
priv_tx->ctx_post_pending = true;
+ atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0;
@@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- stats->tls_ctx++;
}
seq = ntohl(tcp_hdr(skb)->seq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index bd270a85c804..4c9274d390da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -41,10 +41,13 @@
#include "en.h"
struct mlx5e_tls_sw_stats {
+ atomic64_t tx_tls_ctx;
atomic64_t tx_tls_drop_metadata;
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required;
+ atomic64_t rx_tls_ctx;
+ atomic64_t rx_tls_del;
atomic64_t rx_tls_drop_resync_request;
atomic64_t rx_tls_resync_request;
atomic64_t rx_tls_resync_reply;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
index b949b9a7538b..29463bdb7715 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -45,49 +45,60 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
};
+static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
+};
+
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
-
-static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
+static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
{
- return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
+ if (!priv->tls)
+ return NULL;
+ if (mlx5_accel_is_ktls_device(priv->mdev))
+ return mlx5e_ktls_sw_stats_desc;
+ return mlx5e_tls_sw_stats_desc;
}
int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{
- if (!is_tls_atomic_stats(priv))
+ if (!priv->tls)
return 0;
-
- return NUM_TLS_SW_COUNTERS;
+ if (mlx5_accel_is_ktls_device(priv->mdev))
+ return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
+ return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
}
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
- unsigned int i, idx = 0;
+ const struct counter_desc *stats_desc;
+ unsigned int i, n, idx = 0;
- if (!is_tls_atomic_stats(priv))
- return 0;
+ stats_desc = get_tls_atomic_stats(priv);
+ n = mlx5e_tls_get_count(priv);
- for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+ for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_tls_sw_stats_desc[i].format);
+ stats_desc[i].format);
- return NUM_TLS_SW_COUNTERS;
+ return n;
}
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
- int i, idx = 0;
+ const struct counter_desc *stats_desc;
+ unsigned int i, n, idx = 0;
- if (!is_tls_atomic_stats(priv))
- return 0;
+ stats_desc = get_tls_atomic_stats(priv);
+ n = mlx5e_tls_get_count(priv);
- for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+ for (i = 0; i < n; i++)
data[idx++] =
MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- mlx5e_tls_sw_stats_desc, i);
+ stats_desc, i);
- return NUM_TLS_SW_COUNTERS;
+ return n;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index f5f2a8fd0046..53802e18af90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -758,11 +758,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
return 0;
}
-static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
- u32 eth_proto_cap,
- u8 connector_type, bool ext)
+static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
+ struct ethtool_link_ksettings *link_ksettings,
+ u32 eth_proto_cap, u8 connector_type)
{
- if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -898,9 +898,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
-static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
+static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
{
- if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
return ptys2connector_type[connector_type];
if (eth_proto &
@@ -1001,11 +1001,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-
- link_ksettings->base.port = get_connector_port(eth_proto_oper,
- connector_type, ext);
- ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
- connector_type, ext);
+ connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
+ connector_type : MLX5E_PORT_UNKNOWN;
+ link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
+ ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
+ connector_type);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 158f947a8503..5db63b9f3b70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1091,6 +1091,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -2350,6 +2351,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
}
+static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+
+ /* async_icosq is used by XSK only if xdp_prog is active */
+ if (params->xdp_prog)
+ param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+ mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
+}
+
void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_sq_param *param)
@@ -2398,7 +2417,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
- mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
+ mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
}
int mlx5e_open_channels(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index a132fff7a980..8d39bfee84a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1107,8 +1107,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
- mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
- 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
+ if (MLX5_CAP_GEN(mdev, uplink_follow))
+ mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+ 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 92c5b81427b9..88a01c59ce61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
@@ -180,8 +179,6 @@ static const struct counter_desc sw_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
@@ -342,8 +339,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
- s->rx_tls_ctx += rq_stats->tls_ctx;
- s->rx_tls_del += rq_stats->tls_del;
s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
@@ -390,7 +385,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
#ifdef CONFIG_MLX5_EN_TLS
s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
- s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
@@ -1622,8 +1616,6 @@ static const struct counter_desc rq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
@@ -1650,7 +1642,6 @@ static const struct counter_desc sq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
@@ -1776,7 +1767,6 @@ static const struct counter_desc qos_sq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
- { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 93c41312fb03..adf9b7b8b712 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -191,7 +191,6 @@ struct mlx5e_sw_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
- u64 tx_tls_ctx;
u64 tx_tls_ooo;
u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes;
@@ -202,8 +201,6 @@ struct mlx5e_sw_stats {
u64 rx_tls_decrypted_packets;
u64 rx_tls_decrypted_bytes;
- u64 rx_tls_ctx;
- u64 rx_tls_del;
u64 rx_tls_resync_req_pkt;
u64 rx_tls_resync_req_start;
u64 rx_tls_resync_req_end;
@@ -334,8 +331,6 @@ struct mlx5e_rq_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
- u64 tls_ctx;
- u64 tls_del;
u64 tls_resync_req_pkt;
u64 tls_resync_req_start;
u64 tls_resync_req_end;
@@ -364,7 +359,6 @@ struct mlx5e_sq_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_encrypted_packets;
u64 tls_encrypted_bytes;
- u64 tls_ctx;
u64 tls_ooo;
u64 tls_dump_packets;
u64 tls_dump_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index df2a0af854bb..d675107d9eca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1895,6 +1895,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
return 0;
flow_rule_match_meta(rule, &match);
+ if (!match.mask->ingress_ifindex)
+ return 0;
+
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 174dfbc996c6..1fa9c18563da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -931,13 +931,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
mutex_unlock(&table->lock);
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#define MLX5_MAX_ASYNC_EQS 4
+#else
+#define MLX5_MAX_ASYNC_EQS 3
+#endif
+
int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+ int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+ MLX5_CAP_GEN(dev, max_num_eqs) :
+ 1 << MLX5_CAP_GEN(dev, log_max_eq);
int err;
eq_table->num_comp_eqs =
- mlx5_irq_get_num_comp(eq_table->irq_table);
+ min_t(int,
+ mlx5_irq_get_num_comp(eq_table->irq_table),
+ num_eqs - MLX5_MAX_ASYNC_EQS);
err = create_async_eqs(dev);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index 6f6772bf61a2..3da7becc1069 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -248,7 +248,7 @@ err_mod_hdr_regc0:
err_ethertype:
kfree(rule);
out:
- kfree(rule_spec);
+ kvfree(rule_spec);
return err;
}
@@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
e->recirc_cnt = 0;
out:
- kfree(in);
+ kvfree(in);
return err;
}
@@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- kfree(in);
+ kvfree(in);
return -ENOMEM;
}
@@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
}
err_out:
- kfree(spec);
- kfree(in);
+ kvfree(spec);
+ kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 8694b83968b4..d4a2f8d1ee9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -537,6 +537,14 @@ esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *
return i;
}
+static bool
+esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
+{
+ return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
+}
+
static int
esw_setup_dests(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -550,9 +558,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
int err = 0;
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
- MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
- mlx5_eswitch_vport_match_metadata_enabled(esw) &&
- MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+ esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
if (attr->dest_ft) {
@@ -1716,36 +1722,40 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
- /* meta send to vport */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS_2);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ if (esw_src_port_rewrite_supported(esw)) {
+ /* meta send to vport */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- num_vfs = esw->esw_funcs.num_vfs;
- if (num_vfs) {
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
- ix += num_vfs;
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
- err);
- goto send_vport_meta_err;
+ num_vfs = esw->esw_funcs.num_vfs;
+ if (num_vfs) {
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, ix + num_vfs - 1);
+ ix += num_vfs;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+ err);
+ goto send_vport_meta_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+ err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+ if (err)
+ goto meta_rule_err;
}
- esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
- err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
- if (err)
- goto meta_rule_err;
}
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index d9d9e1f488f9..ba28ac7e79bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -21,6 +21,7 @@
#include <net/red.h>
#include <net/vxlan.h>
#include <net/flow_offload.h>
+#include <net/inet_ecn.h>
#include "port.h"
#include "core.h"
@@ -347,6 +348,20 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
+static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
+ bool *trap_en)
+{
+ bool set_ce = false;
+
+ *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+ if (set_ce)
+ return INET_ECN_CE;
+ else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
+ return INET_ECN_ECT_1;
+ else
+ return inner_ecn;
+}
+
static inline struct net_device *
mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 0bd64169bf81..078601d31cde 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1230,16 +1230,22 @@ mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd)
{
+ struct mlxsw_sp1_port_link_mode link;
int i;
- cmd->link_mode = -1;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->lanes = 0;
if (!carrier_ok)
return;
for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
- cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool;
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
+ link = mlxsw_sp1_port_link_mode[i];
+ ethtool_params_from_link_mode(cmd,
+ link.mask_ethtool);
+ }
}
}
@@ -1672,7 +1678,9 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
struct mlxsw_sp2_port_link_mode link;
int i;
- cmd->link_mode = -1;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->lanes = 0;
if (!carrier_ok)
return;
@@ -1680,7 +1688,8 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
link = mlxsw_sp2_port_link_mode[i];
- cmd->link_mode = link.mask_ethtool[1];
+ ethtool_params_from_link_mode(cmd,
+ link.mask_ethtool[1]);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 6ccca39bae84..64a8f838eb53 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -335,12 +335,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
u8 inner_ecn, u8 outer_ecn)
{
char tidem_pl[MLXSW_REG_TIDEM_LEN];
- bool trap_en, set_ce = false;
u8 new_inner_ecn;
+ bool trap_en;
- trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
- new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+ new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+ &trap_en);
mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index e5ec595593f4..9eba8fa684ae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
u8 inner_ecn, u8 outer_ecn)
{
char tndem_pl[MLXSW_REG_TNDEM_LEN];
- bool trap_en, set_ce = false;
u8 new_inner_ecn;
+ bool trap_en;
- trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
- new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+ new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+ &trap_en);
mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 1c3e204d727c..7b6794aa8ea9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -885,8 +885,8 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
}
mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
- mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
- MAC_RX_MAX_SIZE_MASK_);
+ mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
+ << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
lan743x_csr_write(adapter, MAC_RX, mac_rx);
if (enabled) {
@@ -1944,7 +1944,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
struct sk_buff *skb;
dma_addr_t dma_ptr;
- buffer_length = netdev->mtu + ETH_HLEN + 4 + RX_HEAD_PADDING;
+ buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
@@ -2040,7 +2040,7 @@ lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
dev_kfree_skb_irq(skb);
return NULL;
}
- frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 4);
+ frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
if (skb->len > frame_length) {
skb->tail -= skb->len - frame_length;
skb->len = frame_length;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 1634ca6d4a8f..c84c8bf2bc20 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
dev_kfree_skb_any(curr);
if (segs != NULL) {
curr = segs;
- segs = segs->next;
+ segs = next;
curr->next = NULL;
dev_kfree_skb_any(segs);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 0e2db6ea79e9..2ec62c8d86e1 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
dev_consume_skb_any(skb);
else
dev_kfree_skb_any(skb);
+ return;
}
nfp_ccm_rx(&bpf->ccm, skb);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index caf12eec9945..56833a41f3d2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -190,6 +190,7 @@ struct nfp_fl_internal_ports {
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
+ * @merge_table: Hash table to store merged flows
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -223,6 +224,7 @@ struct nfp_flower_priv {
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt;
+ struct rhashtable merge_table;
};
/**
@@ -350,6 +352,12 @@ struct nfp_fl_payload_link {
};
extern const struct rhashtable_params nfp_flower_table_params;
+extern const struct rhashtable_params merge_table_params;
+
+struct nfp_merge_info {
+ u64 parent_ctx;
+ struct rhash_head ht_node;
+};
struct nfp_fl_stats_frame {
__be32 stats_con_id;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index aa06fcb38f8b..327bb56b3ef5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
.automatic_shrinking = true,
};
+const struct rhashtable_params merge_table_params = {
+ .key_offset = offsetof(struct nfp_merge_info, parent_ctx),
+ .head_offset = offsetof(struct nfp_merge_info, ht_node),
+ .key_len = sizeof(u64),
+};
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_num_mems)
{
@@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_flow_table;
+ err = rhashtable_init(&priv->merge_table, &merge_table_params);
+ if (err)
+ goto err_free_stats_ctx_table;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_stats_ctx_table;
+ goto err_free_merge_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -550,6 +560,8 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_merge_table:
+ rhashtable_destroy(&priv->merge_table);
err_free_stats_ctx_table:
rhashtable_destroy(&priv->stats_ctx_table);
err_free_flow_table:
@@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->stats_ctx_table,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&priv->merge_table,
+ nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index d72225d64a75..e95969c462e4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls;
+ struct nfp_merge_info *merge_info;
+ u64 parent_ctx = 0;
int err;
ASSERT_RTNL();
@@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
nfp_flower_is_merge_flow(sub_flow2))
return -EINVAL;
+ /* check if the two flows are already merged */
+ parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
+ parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
+ if (rhashtable_lookup_fast(&priv->merge_table,
+ &parent_ctx, merge_table_params)) {
+ nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
+ return 0;
+ }
+
err = nfp_flower_can_merge(sub_flow1, sub_flow2);
if (err)
return err;
@@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
if (err)
goto err_release_metadata;
+ merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
+ if (!merge_info) {
+ err = -ENOMEM;
+ goto err_remove_rhash;
+ }
+ merge_info->parent_ctx = parent_ctx;
+ err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
+ merge_table_params);
+ if (err)
+ goto err_destroy_merge_info;
+
err = nfp_flower_xmit_flow(app, merge_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
if (err)
- goto err_remove_rhash;
+ goto err_remove_merge_info;
merge_flow->in_hw = true;
sub_flow1->in_hw = false;
return 0;
+err_remove_merge_info:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+ &merge_info->ht_node,
+ merge_table_params));
+err_destroy_merge_info:
+ kfree(merge_info);
err_remove_rhash:
WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
&merge_flow->fl_node,
@@ -1359,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload_link *link, *temp;
+ struct nfp_merge_info *merge_info;
struct nfp_fl_payload *origin;
+ u64 parent_ctx = 0;
bool mod = false;
int err;
@@ -1396,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
err_free_links:
/* Clean any links connected with the merged flow. */
list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
- merge_flow.list)
+ merge_flow.list) {
+ u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
+
+ parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
nfp_flower_unlink_flow(link);
+ }
+
+ merge_info = rhashtable_lookup_fast(&priv->merge_table,
+ &parent_ctx,
+ merge_table_params);
+ if (merge_info) {
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+ &merge_info->ht_node,
+ merge_table_params));
+ kfree(merge_info);
+ }
kfree(merge_flow->action_data);
kfree(merge_flow->mask_data);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 581a92fc3292..1df2c002c9f6 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2350,6 +2350,13 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
pcie_set_readrq(tp->pci_dev, readrq);
+
+ /* Chip doesn't support pause in jumbo mode */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ tp->phydev->advertising, !jumbo);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ tp->phydev->advertising, !jumbo);
+ phy_start_aneg(tp->phydev);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4630,8 +4637,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (!tp->supports_gmii)
phy_set_max_speed(phydev, SPEED_100);
- phy_support_asym_pause(phydev);
-
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 208cae344ffa..4749bd0af160 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1380,88 +1380,6 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
}
/**
- * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
- * @priv: driver private structure
- * Description: this function is called to re-allocate a receive buffer, perform
- * the DMA mapping and init the descriptor.
- */
-static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
-{
- u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
- int i;
-
- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (buf->page) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
- buf->page = NULL;
- }
-
- if (priv->sph && buf->sec_page) {
- page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
- buf->sec_page = NULL;
- }
- }
- }
-
- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
- struct dma_desc *p;
-
- if (priv->extend_desc)
- p = &((rx_q->dma_erx + i)->basic);
- else
- p = rx_q->dma_rx + i;
-
- if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
- if (!buf->page)
- goto err_reinit_rx_buffers;
-
- buf->addr = page_pool_get_dma_addr(buf->page);
- }
-
- if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
- if (!buf->sec_page)
- goto err_reinit_rx_buffers;
-
- buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
- }
-
- stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->sph)
- stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
- else
- stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
- stmmac_init_desc3(priv, p);
- }
- }
-
- return;
-
-err_reinit_rx_buffers:
- do {
- while (--i >= 0)
- stmmac_free_rx_buffer(priv, queue, i);
-
- if (queue == 0)
- break;
-
- i = priv->dma_rx_size;
- } while (queue-- > 0);
-}
-
-/**
* init_dma_rx_desc_rings - init the RX descriptor rings
* @dev: net device structure
* @flags: gfp flag.
@@ -5428,7 +5346,7 @@ int stmmac_resume(struct device *dev)
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
- stmmac_reinit_rx_buffers(priv);
+
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 1e966a39967e..aca7f82f6791 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -504,6 +504,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
return axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
}
+static inline void axienet_lock_mii(struct axienet_local *lp)
+{
+ if (lp->mii_bus)
+ mutex_lock(&lp->mii_bus->mdio_lock);
+}
+
+static inline void axienet_unlock_mii(struct axienet_local *lp)
+{
+ if (lp->mii_bus)
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+}
+
/**
* axienet_iow - Memory mapped Axi Ethernet register write
* @lp: Pointer to axienet local structure
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 5d677db0aee5..f8f8654ea728 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev)
* including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_lock_mii(lp);
ret = axienet_device_reset(ndev);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ axienet_unlock_mii(lp);
ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
if (ret) {
@@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev)
}
/* Do a reset to ensure DMA is really stopped */
- mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_lock_mii(lp);
__axienet_device_reset(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ axienet_unlock_mii(lp);
cancel_work_sync(&lp->dma_err_task);
@@ -1709,9 +1709,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
* including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_lock_mii(lp);
__axienet_device_reset(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ axienet_unlock_mii(lp);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 4ac0373326ef..42f31c681846 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return -EINVAL;
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
geneve->cfg.info.key.tp_dst, sport);
@@ -908,8 +911,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
info = skb_tunnel_info(skb);
if (info) {
- info->key.u.ipv4.dst = fl4.saddr;
- info->key.u.ipv4.src = fl4.daddr;
+ struct ip_tunnel_info *unclone;
+
+ unclone = skb_tunnel_info_unclone(skb);
+ if (unlikely(!unclone)) {
+ dst_release(&rt->dst);
+ return -ENOMEM;
+ }
+
+ unclone->key.u.ipv4.dst = fl4.saddr;
+ unclone->key.u.ipv4.src = fl4.daddr;
}
if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -977,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
geneve->cfg.info.key.tp_dst, sport);
@@ -993,8 +1007,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info = skb_tunnel_info(skb);
if (info) {
- info->key.u.ipv6.dst = fl6.saddr;
- info->key.u.ipv6.src = fl6.daddr;
+ struct ip_tunnel_info *unclone;
+
+ unclone = skb_tunnel_info_unclone(skb);
+ if (unlikely(!unclone)) {
+ dst_release(dst);
+ return -ENOMEM;
+ }
+
+ unclone->key.u.ipv6.dst = fl6.saddr;
+ unclone->key.u.ipv6.src = fl6.daddr;
}
if (!pskb_may_pull(skb, ETH_HLEN)) {
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 0dd0ba915ab9..23ee0b14cbfa 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
return -ENOMEM;
}
usb_anchor_urb(urb, &atusb->idle_urbs);
+ usb_free_urb(urb);
n--;
}
return 0;
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 53282a6d5928..287cccf8f7f4 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
{
- int val;
+ int val, mask = 0;
/* Enable EEE at PHY level */
val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
@@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
if (val < 0)
return val;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported))
+ mask |= MDIO_EEE_1000T;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->supported))
+ mask |= MDIO_EEE_100TX;
+
if (enable)
- val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
+ val |= mask;
else
- val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
+ val &= ~mask;
phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e26a5d663f8a..8018ddf7f316 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -3021,9 +3021,34 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
- .phy_id = MARVELL_PHY_ID_88E6390,
+ .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88E6390",
+ .name = "Marvell 88E6341 Family",
+ /* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = m88e1510_probe,
+ .config_init = marvell_config_init,
+ .config_aneg = m88e6390_config_aneg,
+ .read_status = marvell_read_status,
+ .config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E6390 Family",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = m88e6390_probe,
@@ -3107,7 +3132,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
- { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
{ }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fc86da7f1628..4cf38be26dc9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -69,6 +69,14 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/mutex.h>
+#include <linux/ieee802154.h>
+#include <linux/if_ltalk.h>
+#include <uapi/linux/if_fddi.h>
+#include <uapi/linux/if_hippi.h>
+#include <uapi/linux/if_fc.h>
+#include <net/ax25.h>
+#include <net/rose.h>
+#include <net/6lowpan.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
@@ -2919,6 +2927,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
return __tun_set_ebpf(tun, prog_p, prog);
}
+/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
+static unsigned char tun_get_addr_len(unsigned short type)
+{
+ switch (type) {
+ case ARPHRD_IP6GRE:
+ case ARPHRD_TUNNEL6:
+ return sizeof(struct in6_addr);
+ case ARPHRD_IPGRE:
+ case ARPHRD_TUNNEL:
+ case ARPHRD_SIT:
+ return 4;
+ case ARPHRD_ETHER:
+ return ETH_ALEN;
+ case ARPHRD_IEEE802154:
+ case ARPHRD_IEEE802154_MONITOR:
+ return IEEE802154_EXTENDED_ADDR_LEN;
+ case ARPHRD_PHONET_PIPE:
+ case ARPHRD_PPP:
+ case ARPHRD_NONE:
+ return 0;
+ case ARPHRD_6LOWPAN:
+ return EUI64_ADDR_LEN;
+ case ARPHRD_FDDI:
+ return FDDI_K_ALEN;
+ case ARPHRD_HIPPI:
+ return HIPPI_ALEN;
+ case ARPHRD_IEEE802:
+ return FC_ALEN;
+ case ARPHRD_ROSE:
+ return ROSE_ADDR_LEN;
+ case ARPHRD_NETROM:
+ return AX25_ADDR_LEN;
+ case ARPHRD_LOCALTLK:
+ return LTALK_ALEN;
+ default:
+ return 0;
+ }
+}
+
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
@@ -3082,6 +3129,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
tun->dev->type = (int) arg;
+ tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
netif_info(tun, drv, tun->dev, "linktype set to %d\n",
tun->dev->type);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 31d51346786a..9bc58e64b5b7 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
return serial;
}
-static int get_free_serial_index(void)
+static int obtain_minor(struct hso_serial *serial)
{
int index;
unsigned long flags;
@@ -619,8 +619,10 @@ static int get_free_serial_index(void)
spin_lock_irqsave(&serial_table_lock, flags);
for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
if (serial_table[index] == NULL) {
+ serial_table[index] = serial->parent;
+ serial->minor = index;
spin_unlock_irqrestore(&serial_table_lock, flags);
- return index;
+ return 0;
}
}
spin_unlock_irqrestore(&serial_table_lock, flags);
@@ -629,15 +631,12 @@ static int get_free_serial_index(void)
return -1;
}
-static void set_serial_by_index(unsigned index, struct hso_serial *serial)
+static void release_minor(struct hso_serial *serial)
{
unsigned long flags;
spin_lock_irqsave(&serial_table_lock, flags);
- if (serial)
- serial_table[index] = serial->parent;
- else
- serial_table[index] = NULL;
+ serial_table[serial->minor] = NULL;
spin_unlock_irqrestore(&serial_table_lock, flags);
}
@@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
static void hso_serial_tty_unregister(struct hso_serial *serial)
{
tty_unregister_device(tty_drv, serial->minor);
+ release_minor(serial);
}
static void hso_serial_common_free(struct hso_serial *serial)
@@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
int rx_size, int tx_size)
{
- int minor;
int i;
tty_port_init(&serial->port);
- minor = get_free_serial_index();
- if (minor < 0)
+ if (obtain_minor(serial))
goto exit2;
/* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port,
- tty_drv, minor, &serial->parent->interface->dev,
+ tty_drv, serial->minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
- if (IS_ERR(serial->parent->dev))
+ if (IS_ERR(serial->parent->dev)) {
+ release_minor(serial);
goto exit2;
+ }
- /* fill in specific data for later use */
- serial->minor = minor;
serial->magic = HSO_SERIAL_MAGIC;
spin_lock_init(&serial->serial_lock);
serial->num_rx_urbs = num_urbs;
@@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device(
serial->write_data = hso_std_serial_write_data;
- /* and record this serial */
- set_serial_by_index(serial->minor, serial);
-
/* setup the proc dirs and files if needed */
hso_log_port(hso_dev);
@@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
serial->shared_int->ref_count++;
mutex_unlock(&serial->shared_int->shared_int_lock);
- /* and record this serial */
- set_serial_by_index(serial->minor, serial);
-
/* setup the proc dirs and files if needed */
hso_log_port(hso_dev);
@@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface)
cancel_work_sync(&serial_table[i]->async_get_intf);
hso_serial_tty_unregister(serial);
kref_put(&serial_table[i]->ref, hso_serial_ref_free);
- set_serial_by_index(i, NULL);
}
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 82e520d2cb12..0824e6999e49 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -406,9 +406,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
offset += hdr_padded_len;
p += hdr_padded_len;
- copy = len;
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ /* Copy all frame if it fits skb->head, otherwise
+ * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
+ */
+ if (len <= skb_tailroom(skb))
+ copy = len;
+ else
+ copy = ETH_HLEN + metasize;
skb_put_data(skb, p, copy);
if (metasize) {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 6d9130859c55..503e2fd7ce51 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -471,9 +471,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
skb_dst_drop(skb);
- /* if dst.dev is loopback or the VRF device again this is locally
- * originated traffic destined to a local address. Short circuit
- * to Rx path
+ /* if dst.dev is the VRF device again this is locally originated traffic
+ * destined to a local address. Short circuit to Rx path.
*/
if (dst->dev == dev)
return vrf_local_xmit(skb, dev, dst);
@@ -547,9 +546,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
skb_dst_drop(skb);
- /* if dst.dev is loopback or the VRF device again this is locally
- * originated traffic destined to a local address. Short circuit
- * to Rx path
+ /* if dst.dev is the VRF device again this is locally originated traffic
+ * destined to a local address. Short circuit to Rx path.
*/
if (rt->dst.dev == vrf_dev)
return vrf_local_xmit(skb, vrf_dev, &rt->dst);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 666dd201c3d5..53dbc67e8a34 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
} else if (err) {
if (info) {
+ struct ip_tunnel_info *unclone;
struct in_addr src, dst;
+ unclone = skb_tunnel_info_unclone(skb);
+ if (unlikely(!unclone))
+ goto tx_error;
+
src = remote_ip.sin.sin_addr;
dst = local_ip.sin.sin_addr;
- info->key.u.ipv4.src = src.s_addr;
- info->key.u.ipv4.dst = dst.s_addr;
+ unclone->key.u.ipv4.src = src.s_addr;
+ unclone->key.u.ipv4.dst = dst.s_addr;
}
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
dst_release(ndst);
@@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
} else if (err) {
if (info) {
+ struct ip_tunnel_info *unclone;
struct in6_addr src, dst;
+ unclone = skb_tunnel_info_unclone(skb);
+ if (unlikely(!unclone))
+ goto tx_error;
+
src = remote_ip.sin6.sin6_addr;
dst = local_ip.sin6.sin6_addr;
- info->key.u.ipv6.src = src;
- info->key.u.ipv6.dst = dst;
+ unclone->key.u.ipv6.src = src;
+ unclone->key.u.ipv6.dst = dst;
}
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0720f5f92caa..4d9dc7d15908 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
if (pad > 0) { /* Pad the frame with zeros */
if (__skb_pad(skb, pad, false))
- goto drop;
+ goto out;
skb_put(skb, pad);
}
}
@@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
kfree_skb(skb);
+out:
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 71e2ada86793..72e2e71aac0e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -251,7 +251,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc)
int first_slot = ATH_BCBUF;
int slot;
- tasklet_disable(&sc->bcon_tasklet);
+ tasklet_disable_in_atomic(&sc->bcon_tasklet);
/* Find first taken slot. */
for (slot = 0; slot < ATH_BCBUF; slot++) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 6d30a0fcecea..34cd8a7401fe 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2439,7 +2439,7 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool locked)
vif = ifp->vif;
cfg = wdev_to_cfg(&vif->wdev);
cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- if (locked) {
+ if (!locked) {
rtnl_lock();
wiphy_lock(cfg->wiphy);
cfg80211_unregister_wdev(&vif->wdev);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
index 3dbc6f3f92cc..231d2517f398 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2005-2014, 2021 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#include <linux/sched.h>
@@ -26,7 +26,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
if (!list_empty(&notif_wait->notif_waits)) {
struct iwl_notification_wait *w;
- spin_lock(&notif_wait->notif_wait_lock);
+ spin_lock_bh(&notif_wait->notif_wait_lock);
list_for_each_entry(w, &notif_wait->notif_waits, list) {
int i;
bool found = false;
@@ -59,7 +59,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
triggered = true;
}
}
- spin_unlock(&notif_wait->notif_wait_lock);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
}
return triggered;
@@ -70,10 +70,10 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{
struct iwl_notification_wait *wait_entry;
- spin_lock(&notif_wait->notif_wait_lock);
+ spin_lock_bh(&notif_wait->notif_wait_lock);
list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
wait_entry->aborted = true;
- spin_unlock(&notif_wait->notif_wait_lock);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
wake_up_all(&notif_wait->notif_waitq);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 75f99ff7f908..c4f5da76f1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -414,6 +414,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_QNJ 0x36
#define IWL_CFG_MAC_TYPE_SO 0x37
#define IWL_CFG_MAC_TYPE_SNJ 0x42
+#define IWL_CFG_MAC_TYPE_SOF 0x43
#define IWL_CFG_MAC_TYPE_MA 0x44
#define IWL_CFG_RF_TYPE_TH 0x105
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index af684f80b0cc..c5a1e84dc1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -232,7 +232,7 @@ enum iwl_reg_capa_flags_v2 {
REG_CAPA_V2_MCS_9_ALLOWED = BIT(6),
REG_CAPA_V2_WEATHER_DISABLED = BIT(7),
REG_CAPA_V2_40MHZ_ALLOWED = BIT(8),
- REG_CAPA_V2_11AX_DISABLED = BIT(13),
+ REG_CAPA_V2_11AX_DISABLED = BIT(10),
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 130760572262..34ddef97b099 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1786,10 +1786,13 @@ static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf,
return -EINVAL;
/* value zero triggers re-sending the default table to the device */
- if (!op_id)
+ if (!op_id) {
+ mutex_lock(&mvm->mutex);
ret = iwl_rfi_send_config_cmd(mvm, NULL);
- else
+ mutex_unlock(&mvm->mutex);
+ } else {
ret = -EOPNOTSUPP; /* in the future a new table will be added */
+ }
return ret ?: count;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index 873919048143..0b818067067c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020 - 2021 Intel Corporation
*/
#include "mvm.h"
@@ -66,6 +66,8 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
return -EOPNOTSUPP;
+ lockdep_assert_held(&mvm->mutex);
+
/* in case no table is passed, use the default one */
if (!rfi_table) {
memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
@@ -75,9 +77,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
cmd.oem = 1;
}
- mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
- mutex_unlock(&mvm->mutex);
if (ret)
IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index c21736f80c29..af5a6dd81c41 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -272,10 +272,10 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
rx_status->chain_signal[2] = S8_MIN;
}
-static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
- struct ieee80211_hdr *hdr,
- struct iwl_rx_mpdu_desc *desc,
- u32 status)
+static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc,
+ u32 status)
{
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
@@ -285,6 +285,9 @@ static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
u32 len = le16_to_cpu(desc->mpdu_len);
const u8 *frame = (void *)hdr;
+ if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
/*
* For non-beacon, we don't really care. But beacons may
* be filtered out, and we thus need the firmware's replay
@@ -356,6 +359,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
return -1;
+ if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_has_protected(hdr->frame_control)))
+ return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status);
+
if (!ieee80211_has_protected(hdr->frame_control) ||
(status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
IWL_RX_MPDU_STATUS_SEC_NONE)
@@ -411,7 +418,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
stats->flag |= RX_FLAG_DECRYPTED;
return 0;
case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
- return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status);
+ break;
default:
/*
* Sometimes we can get frames that were not decrypted
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 8fba190e84cf..cecc32e7dbe8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -75,15 +75,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
- u32_encode_bits(250,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
- CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
- u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -217,26 +208,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- /*
- * To workaround hardware latency issues during the boot process,
- * initialize the LTR to ~250 usec (see ltr_val above).
- * The firmware initializes this again later (to a smaller value).
- */
- if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
- !trans->trans_cfg->integrated) {
- iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
- } else if (trans->trans_cfg->integrated &&
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
- iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
- iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
- }
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
- else
- iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
-
return 0;
err_free_ctxt_info:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index d1bb273d6b6d..74ce31fdf45e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -240,7 +240,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* kick FW self load */
iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
- iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
/* Context info will be released upon alive or failure to get one */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ffaf973dae94..558a0b2ef0fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -592,6 +592,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
/* So with HR */
IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
@@ -1040,7 +1041,31 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax201_name)
+ iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Hr */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Gf */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
#endif /* CONFIG_IWLMVM */
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 497ef3405da3..94ffc1ae484d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -266,6 +266,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
mutex_unlock(&trans_pcie->mutex);
}
+static void iwl_pcie_set_ltr(struct iwl_trans *trans)
+{
+ u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->trans_cfg->integrated) {
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (trans->trans_cfg->integrated &&
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+ iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
+ }
+}
+
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
@@ -332,6 +360,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
if (ret)
goto out;
+ iwl_pcie_set_ltr(trans);
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+ else
+ iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 381e8f90b6f2..7ae32491b5da 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -928,6 +928,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ unsigned long flags;
if (WARN(!trans->wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
@@ -1011,10 +1012,10 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- spin_lock_bh(&txq->lock);
+ spin_lock_irqsave(&txq->lock, flags);
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
@@ -1174,7 +1175,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
unlock_reg:
spin_unlock(&trans_pcie->reg_lock);
out:
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index 18980bb32dee..6dad7f6ab09d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -135,10 +135,10 @@
#define MT_WTBLON_TOP_BASE 0x34000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200)
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
-#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x230)
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31)
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index c878097f0dda..1df959532c7d 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -12,6 +12,7 @@
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <linux/etherdevice.h>
+#include <linux/math64.h>
#include <linux/module.h>
static struct wiphy *common_wiphy;
@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work)
scan_result.work);
struct wiphy *wiphy = priv_to_wiphy(priv);
struct cfg80211_scan_info scan_info = { .aborted = false };
+ u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
CFG80211_BSS_FTYPE_PRESP,
- fake_router_bssid,
- ktime_get_boottime_ns(),
+ fake_router_bssid, tsf,
WLAN_CAPABILITY_ESS, 0,
(void *)&ssid, sizeof(ssid),
DBM_TO_MBM(-50), GFP_KERNEL);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index a5439c130130..d24b7a7993aa 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
- hotplug_status_changed,
- "%s/%s", dev->nodename, "hotplug-status");
- if (!err)
+ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+ NULL, hotplug_status_changed,
+ "%s/%s", dev->nodename,
+ "hotplug-status");
+ if (err)
+ goto err;
be->have_hotplug_status_watch = 1;
+ }
netif_tx_wake_all_queues(be->vif->dev);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 48f0985ca8a0..3a777d0073b7 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -631,16 +631,14 @@ void nvdimm_check_and_set_ro(struct gendisk *disk)
struct nd_region *nd_region = to_nd_region(dev->parent);
int disk_ro = get_disk_ro(disk);
- /*
- * Upgrade to read-only if the region is read-only preserve as
- * read-only if the disk is already read-only.
- */
- if (disk_ro || nd_region->ro == disk_ro)
+ /* catch the disk up with the region ro state */
+ if (disk_ro == nd_region->ro)
return;
- dev_info(dev, "%s read-only, marking %s read-only\n",
- dev_name(&nd_region->dev), disk->disk_name);
- set_disk_ro(disk, 1);
+ dev_info(dev, "%s read-%s, marking %s read-%s\n",
+ dev_name(&nd_region->dev), nd_region->ro ? "only" : "write",
+ disk->disk_name, nd_region->ro ? "only" : "write");
+ set_disk_ro(disk, nd_region->ro);
}
EXPORT_SYMBOL(nvdimm_check_and_set_ro);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b8a85bfb2e95..7daac795db39 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -26,6 +26,7 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "pmem.h"
+#include "btt.h"
#include "pfn.h"
#include "nd.h"
@@ -585,7 +586,7 @@ static void nd_pmem_shutdown(struct device *dev)
nvdimm_flush(to_nd_region(dev->parent), NULL);
}
-static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+static void pmem_revalidate_poison(struct device *dev)
{
struct nd_region *nd_region;
resource_size_t offset = 0, end_trunc = 0;
@@ -595,9 +596,6 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
struct range range;
struct kernfs_node *bb_state;
- if (event != NVDIMM_REVALIDATE_POISON)
- return;
-
if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
@@ -635,6 +633,37 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
sysfs_notify_dirent(bb_state);
}
+static void pmem_revalidate_region(struct device *dev)
+{
+ struct pmem_device *pmem;
+
+ if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+ struct btt *btt = nd_btt->btt;
+
+ nvdimm_check_and_set_ro(btt->btt_disk);
+ return;
+ }
+
+ pmem = dev_get_drvdata(dev);
+ nvdimm_check_and_set_ro(pmem->disk);
+}
+
+static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+{
+ switch (event) {
+ case NVDIMM_REVALIDATE_POISON:
+ pmem_revalidate_poison(dev);
+ break;
+ case NVDIMM_REVALIDATE_REGION:
+ pmem_revalidate_region(dev);
+ break;
+ default:
+ dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
+ break;
+ }
+}
+
MODULE_ALIAS("pmem");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ef23119db574..9ccf3d608799 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -518,6 +518,12 @@ static ssize_t read_only_show(struct device *dev,
return sprintf(buf, "%d\n", nd_region->ro);
}
+static int revalidate_read_only(struct device *dev, void *data)
+{
+ nd_device_notify(dev, NVDIMM_REVALIDATE_REGION);
+ return 0;
+}
+
static ssize_t read_only_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
@@ -529,6 +535,7 @@ static ssize_t read_only_store(struct device *dev,
return rc;
nd_region->ro = ro;
+ device_for_each_child(dev, NULL, revalidate_read_only);
return len;
}
static DEVICE_ATTR_RW(read_only);
@@ -1239,6 +1246,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
+ /* Test if an explicit flush function is defined */
+ if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
+ return 1;
+
+ /* Test if any flush hints for the region are available */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -1249,8 +1261,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
}
/*
- * The platform defines dimm devices without hints, assume
- * platform persistence mechanism like ADR
+ * The platform defines dimm devices without hints nor explicit flush,
+ * assume platform persistence mechanism like ADR
*/
return 0;
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index dcc1dd96911a..adb26aff481d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -205,7 +205,7 @@ static void populate_properties(const void *blob,
*pprev = NULL;
}
-static bool populate_node(const void *blob,
+static int populate_node(const void *blob,
int offset,
void **mem,
struct device_node *dad,
@@ -214,24 +214,24 @@ static bool populate_node(const void *blob,
{
struct device_node *np;
const char *pathp;
- unsigned int l, allocl;
+ int len;
- pathp = fdt_get_name(blob, offset, &l);
+ pathp = fdt_get_name(blob, offset, &len);
if (!pathp) {
*pnp = NULL;
- return false;
+ return len;
}
- allocl = ++l;
+ len++;
- np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
+ np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
__alignof__(struct device_node));
if (!dryrun) {
char *fn;
of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
- memcpy(fn, pathp, l);
+ memcpy(fn, pathp, len);
if (dad != NULL) {
np->parent = dad;
@@ -295,6 +295,7 @@ static int unflatten_dt_nodes(const void *blob,
struct device_node *nps[FDT_MAX_DEPTH];
void *base = mem;
bool dryrun = !base;
+ int ret;
if (nodepp)
*nodepp = NULL;
@@ -322,9 +323,10 @@ static int unflatten_dt_nodes(const void *blob,
!of_fdt_device_is_available(blob, offset))
continue;
- if (!populate_node(blob, offset, &mem, nps[depth],
- &nps[depth+1], dryrun))
- return mem - base;
+ ret = populate_node(blob, offset, &mem, nps[depth],
+ &nps[depth+1], dryrun);
+ if (ret < 0)
+ return ret;
if (!dryrun && nodepp && !*nodepp)
*nodepp = nps[depth+1];
@@ -372,6 +374,10 @@ void *__unflatten_device_tree(const void *blob,
{
int size;
void *mem;
+ int ret;
+
+ if (mynodes)
+ *mynodes = NULL;
pr_debug(" -> unflatten_device_tree()\n");
@@ -392,7 +398,7 @@ void *__unflatten_device_tree(const void *blob,
/* First pass, scan for size */
size = unflatten_dt_nodes(blob, NULL, dad, NULL);
- if (size < 0)
+ if (size <= 0)
return NULL;
size = ALIGN(size, 4);
@@ -410,12 +416,16 @@ void *__unflatten_device_tree(const void *blob,
pr_debug(" unflattening %p...\n", mem);
/* Second pass, do actual unflattening */
- unflatten_dt_nodes(blob, mem, dad, mynodes);
+ ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
+
if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warn("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
- if (detached && mynodes) {
+ if (ret <= 0)
+ return NULL;
+
+ if (detached && mynodes && *mynodes) {
of_node_set_flag(*mynodes, OF_DETACHED);
pr_debug("unflattened tree is detached\n");
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index d9e6a324de0a..d717efbd637d 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -8,6 +8,8 @@
* Copyright (C) 1996-2005 Paul Mackerras.
*/
+#define FDT_ALIGN_SIZE 8
+
/**
* struct alias_prop - Alias property in 'aliases' node
* @link: List node to link the structure in aliases_lookup list
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 50bbe0edf538..23effe5e50ec 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -57,7 +57,7 @@ struct fragment {
* struct overlay_changeset
* @id: changeset identifier
* @ovcs_list: list on which we are located
- * @fdt: FDT that was unflattened to create @overlay_tree
+ * @fdt: base of memory allocated to hold aligned FDT that was unflattened to create @overlay_tree
* @overlay_tree: expanded device tree that contains the fragment nodes
* @count: count of fragment structures
* @fragments: fragment nodes in the overlay expanded device tree
@@ -719,8 +719,8 @@ static struct device_node *find_target(struct device_node *info_node)
/**
* init_overlay_changeset() - initialize overlay changeset from overlay tree
* @ovcs: Overlay changeset to build
- * @fdt: the FDT that was unflattened to create @tree
- * @tree: Contains all the overlay fragments and overlay fixup nodes
+ * @fdt: base of memory allocated to hold aligned FDT that was unflattened to create @tree
+ * @tree: Contains the overlay fragments and overlay fixup nodes
*
* Initialize @ovcs. Populate @ovcs->fragments with node information from
* the top level of @tree. The relevant top level nodes are the fragment
@@ -873,7 +873,7 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
* internal documentation
*
* of_overlay_apply() - Create and apply an overlay changeset
- * @fdt: the FDT that was unflattened to create @tree
+ * @fdt: base of memory allocated to hold the aligned FDT
* @tree: Expanded overlay device tree
* @ovcs_id: Pointer to overlay changeset id
*
@@ -953,7 +953,9 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
/*
* after overlay_notify(), ovcs->overlay_tree related pointers may have
* leaked to drivers, so can not kfree() tree, aka ovcs->overlay_tree;
- * and can not free fdt, aka ovcs->fdt
+ * and can not free memory containing aligned fdt. The aligned fdt
+ * is contained within the memory at ovcs->fdt, possibly at an offset
+ * from ovcs->fdt.
*/
ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY);
if (ret) {
@@ -1014,10 +1016,11 @@ out:
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
int *ovcs_id)
{
- const void *new_fdt;
+ void *new_fdt;
+ void *new_fdt_align;
int ret;
u32 size;
- struct device_node *overlay_root;
+ struct device_node *overlay_root = NULL;
*ovcs_id = 0;
ret = 0;
@@ -1036,11 +1039,14 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
* Must create permanent copy of FDT because of_fdt_unflatten_tree()
* will create pointers to the passed in FDT in the unflattened tree.
*/
- new_fdt = kmemdup(overlay_fdt, size, GFP_KERNEL);
+ new_fdt = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
if (!new_fdt)
return -ENOMEM;
- of_fdt_unflatten_tree(new_fdt, NULL, &overlay_root);
+ new_fdt_align = PTR_ALIGN(new_fdt, FDT_ALIGN_SIZE);
+ memcpy(new_fdt_align, overlay_fdt, size);
+
+ of_fdt_unflatten_tree(new_fdt_align, NULL, &overlay_root);
if (!overlay_root) {
pr_err("unable to unflatten overlay_fdt\n");
ret = -EINVAL;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 5036a362f52e..78427c85bef3 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1262,7 +1262,16 @@ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
-DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
+
+static struct device_node *parse_gpios(struct device_node *np,
+ const char *prop_name, int index)
+{
+ if (!strcmp_suffix(prop_name, ",nr-gpios"))
+ return NULL;
+
+ return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
+ "#gpio-cells");
+}
static struct device_node *parse_iommu_maps(struct device_node *np,
const char *prop_name, int index)
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index eb100627c186..819a20acaa93 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
@@ -1408,7 +1409,8 @@ static void attach_node_and_children(struct device_node *np)
static int __init unittest_data_add(void)
{
void *unittest_data;
- struct device_node *unittest_data_node, *np;
+ void *unittest_data_align;
+ struct device_node *unittest_data_node = NULL, *np;
/*
* __dtb_testcases_begin[] and __dtb_testcases_end[] are magically
* created by cmd_dt_S_dtb in scripts/Makefile.lib
@@ -1417,21 +1419,29 @@ static int __init unittest_data_add(void)
extern uint8_t __dtb_testcases_end[];
const int size = __dtb_testcases_end - __dtb_testcases_begin;
int rc;
+ void *ret;
if (!size) {
- pr_warn("%s: No testcase data to attach; not running tests\n",
- __func__);
+ pr_warn("%s: testcases is empty\n", __func__);
return -ENODATA;
}
/* creating copy */
- unittest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL);
+ unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
if (!unittest_data)
return -ENOMEM;
- of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
+ unittest_data_align = PTR_ALIGN(unittest_data, FDT_ALIGN_SIZE);
+ memcpy(unittest_data_align, __dtb_testcases_begin, size);
+
+ ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node);
+ if (!ret) {
+ pr_warn("%s: unflatten testcases tree failed\n", __func__);
+ kfree(unittest_data);
+ return -ENODATA;
+ }
if (!unittest_data_node) {
- pr_warn("%s: No tree to attach; not running tests\n", __func__);
+ pr_warn("%s: testcases tree is empty\n", __func__);
kfree(unittest_data);
return -ENODATA;
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 27a17a1e4a7c..1ff4ce24f4b3 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1292,7 +1292,7 @@ exit_unlock:
* resumes, hv_pci_restore_msi_state() is able to correctly restore
* the interrupt with the correct affinity.
*/
- if (res && hbus->state != hv_pcibus_removing)
+ if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
@@ -1458,7 +1458,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* Prevents hv_pci_onchannelcallback() from running concurrently
* in the tasklet.
*/
- tasklet_disable(&channel->callback_event);
+ tasklet_disable_in_atomic(&channel->callback_event);
/*
* Since this function is called with IRQ locks held, can't
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index f81e2ec90005..666d8a9b557f 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -306,7 +306,7 @@ static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var);
+ return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var);
}
static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
@@ -525,8 +525,8 @@ static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
/* Global events have single fixed source code */
- return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
- (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
+ return sysfs_emit(buf, "event=0x%lx,source=0x%x\n",
+ (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
}
/*
@@ -696,7 +696,7 @@ static ssize_t cci_pmu_format_show(struct device *dev,
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
static ssize_t cci_pmu_event_show(struct device *dev,
@@ -705,8 +705,8 @@ static ssize_t cci_pmu_event_show(struct device *dev,
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
/* source parameter is mandatory for normal PMU events */
- return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n",
- (unsigned long)eattr->var);
+ return sysfs_emit(buf, "source=?,event=0x%lx\n",
+ (unsigned long)eattr->var);
}
static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index a0a71c1df042..96d47cb302dd 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -221,7 +221,7 @@ static ssize_t arm_ccn_pmu_format_show(struct device *dev,
struct dev_ext_attribute *ea = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
+ return sysfs_emit(buf, "%s\n", (char *)ea->var);
}
#define CCN_FORMAT_ATTR(_name, _config) \
@@ -326,43 +326,38 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
struct arm_ccn_pmu_event *event = container_of(attr,
struct arm_ccn_pmu_event, attr);
- ssize_t res;
+ int res;
- res = scnprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
+ res = sysfs_emit(buf, "type=0x%x", event->type);
if (event->event)
- res += scnprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
- event->event);
+ res += sysfs_emit_at(buf, res, ",event=0x%x", event->event);
if (event->def)
- res += scnprintf(buf + res, PAGE_SIZE - res, ",%s",
- event->def);
+ res += sysfs_emit_at(buf, res, ",%s", event->def);
if (event->mask)
- res += scnprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
- event->mask);
+ res += sysfs_emit_at(buf, res, ",mask=0x%x", event->mask);
/* Arguments required by an event */
switch (event->type) {
case CCN_TYPE_CYCLES:
break;
case CCN_TYPE_XP:
- res += scnprintf(buf + res, PAGE_SIZE - res,
- ",xp=?,vc=?");
+ res += sysfs_emit_at(buf, res, ",xp=?,vc=?");
if (event->event == CCN_EVENT_WATCHPOINT)
- res += scnprintf(buf + res, PAGE_SIZE - res,
+ res += sysfs_emit_at(buf, res,
",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
else
- res += scnprintf(buf + res, PAGE_SIZE - res,
- ",bus=?");
+ res += sysfs_emit_at(buf, res, ",bus=?");
break;
case CCN_TYPE_MN:
- res += scnprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
+ res += sysfs_emit_at(buf, res, ",node=%d", ccn->mn_id);
break;
default:
- res += scnprintf(buf + res, PAGE_SIZE - res, ",node=?");
+ res += sysfs_emit_at(buf, res, ",node=?");
break;
}
- res += scnprintf(buf + res, PAGE_SIZE - res, "\n");
+ res += sysfs_emit_at(buf, res, "\n");
return res;
}
@@ -476,7 +471,7 @@ static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
- return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
+ return mask ? sysfs_emit(buf, "0x%016llx\n", *mask) : -EINVAL;
}
static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 1328159fe564..56a5c355701d 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -348,19 +348,19 @@ static ssize_t arm_cmn_event_show(struct device *dev,
eattr = container_of(attr, typeof(*eattr), attr);
if (eattr->type == CMN_TYPE_DTC)
- return snprintf(buf, PAGE_SIZE, "type=0x%x\n", eattr->type);
+ return sysfs_emit(buf, "type=0x%x\n", eattr->type);
if (eattr->type == CMN_TYPE_WP)
- return snprintf(buf, PAGE_SIZE,
- "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
- eattr->type, eattr->eventid);
+ return sysfs_emit(buf,
+ "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
+ eattr->type, eattr->eventid);
if (arm_cmn_is_occup_event(eattr->type, eattr->eventid))
- return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
- eattr->type, eattr->eventid, eattr->occupid);
+ return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
+ eattr->type, eattr->eventid, eattr->occupid);
- return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x\n",
- eattr->type, eattr->eventid);
+ return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type,
+ eattr->eventid);
}
static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
@@ -560,12 +560,12 @@ static ssize_t arm_cmn_format_show(struct device *dev,
int lo = __ffs(fmt->field), hi = __fls(fmt->field);
if (lo == hi)
- return snprintf(buf, PAGE_SIZE, "config:%d\n", lo);
+ return sysfs_emit(buf, "config:%d\n", lo);
if (!fmt->config)
- return snprintf(buf, PAGE_SIZE, "config:%d-%d\n", lo, hi);
+ return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
- return snprintf(buf, PAGE_SIZE, "config%d:%d-%d\n", fmt->config, lo, hi);
+ return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
}
#define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index f2a85500258d..b6c2511d59af 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -113,7 +113,7 @@ dmc620_pmu_event_show(struct device *dev,
eattr = container_of(attr, typeof(*eattr), attr);
- return sprintf(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
+ return sysfs_emit(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
}
#define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2) \
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 0459a3403469..196faea074d0 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -136,8 +136,7 @@ static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "event=0x%lx\n",
- (unsigned long)eattr->var);
+ return sysfs_emit(buf, "event=0x%lx\n", (unsigned long)eattr->var);
}
static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
@@ -146,7 +145,7 @@ static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
static ssize_t dsu_pmu_cpumask_show(struct device *dev,
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 933bd8410fc2..513de1f54e2d 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -6,6 +6,7 @@
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#define dev_fmt pr_fmt
#include <linux/bug.h>
#include <linux/cpumask.h>
@@ -62,7 +63,7 @@ static bool pmu_has_irq_affinity(struct device_node *node)
return !!of_find_property(node, "interrupt-affinity", NULL);
}
-static int pmu_parse_irq_affinity(struct device_node *node, int i)
+static int pmu_parse_irq_affinity(struct device *dev, int i)
{
struct device_node *dn;
int cpu;
@@ -72,19 +73,18 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
* affinity matches our logical CPU order, as we used to assume.
* This is fragile, so we'll warn in pmu_parse_irqs().
*/
- if (!pmu_has_irq_affinity(node))
+ if (!pmu_has_irq_affinity(dev->of_node))
return i;
- dn = of_parse_phandle(node, "interrupt-affinity", i);
+ dn = of_parse_phandle(dev->of_node, "interrupt-affinity", i);
if (!dn) {
- pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n",
- i, node);
+ dev_warn(dev, "failed to parse interrupt-affinity[%d]\n", i);
return -EINVAL;
}
cpu = of_cpu_node_to_id(dn);
if (cpu < 0) {
- pr_warn("failed to find logical CPU for %pOFn\n", dn);
+ dev_warn(dev, "failed to find logical CPU for %pOFn\n", dn);
cpu = nr_cpu_ids;
}
@@ -98,19 +98,18 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
int i = 0, num_irqs;
struct platform_device *pdev = pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ struct device *dev = &pdev->dev;
num_irqs = platform_irq_count(pdev);
- if (num_irqs < 0) {
- pr_err("unable to count PMU IRQs\n");
- return num_irqs;
- }
+ if (num_irqs < 0)
+ return dev_err_probe(dev, num_irqs, "unable to count PMU IRQs\n");
/*
* In this case we have no idea which CPUs are covered by the PMU.
* To match our prior behaviour, we assume all CPUs in this case.
*/
if (num_irqs == 0) {
- pr_warn("no irqs for PMU, sampling events not supported\n");
+ dev_warn(dev, "no irqs for PMU, sampling events not supported\n");
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
cpumask_setall(&pmu->supported_cpus);
return 0;
@@ -122,10 +121,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
return pmu_parse_percpu_irq(pmu, irq);
}
- if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) {
- pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
- pdev->dev.of_node);
- }
+ if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node))
+ dev_warn(dev, "no interrupt-affinity property, guessing.\n");
for (i = 0; i < num_irqs; i++) {
int cpu, irq;
@@ -135,18 +132,18 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
continue;
if (irq_is_percpu_devid(irq)) {
- pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
+ dev_warn(dev, "multiple PPIs or mismatched SPI/PPI detected\n");
return -EINVAL;
}
- cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
+ cpu = pmu_parse_irq_affinity(dev, i);
if (cpu < 0)
return cpu;
if (cpu >= nr_cpu_ids)
continue;
if (per_cpu(hw_events->irq, cpu)) {
- pr_warn("multiple PMU IRQs for the same CPU detected\n");
+ dev_warn(dev, "multiple PMU IRQs for the same CPU detected\n");
return -EINVAL;
}
@@ -191,9 +188,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table)
{
- const struct of_device_id *of_id;
armpmu_init_fn init_fn;
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct arm_pmu *pmu;
int ret = -ENODEV;
@@ -207,15 +203,14 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (ret)
goto out_free;
- if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
- init_fn = of_id->data;
-
- pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+ init_fn = of_device_get_match_data(dev);
+ if (init_fn) {
+ pmu->secure_access = of_property_read_bool(dev->of_node,
"secure-reg-access");
/* arm64 systems boot only as non-secure */
if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
- pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+ dev_warn(dev, "ignoring \"secure-reg-access\" property for arm64\n");
pmu->secure_access = false;
}
@@ -226,7 +221,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
}
if (ret) {
- pr_info("%pOF: failed to probe PMU!\n", node);
+ dev_err(dev, "failed to probe PMU!\n");
goto out_free;
}
@@ -235,15 +230,16 @@ int arm_pmu_device_probe(struct platform_device *pdev,
goto out_free_irqs;
ret = armpmu_register(pmu);
- if (ret)
- goto out_free;
+ if (ret) {
+ dev_err(dev, "failed to register PMU devices!\n");
+ goto out_free_irqs;
+ }
return 0;
out_free_irqs:
armpmu_free_irqs(pmu);
out_free:
- pr_info("%pOF: failed to register PMU devices!\n", node);
armpmu_free(pmu);
return ret;
}
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 8ff7a67f691c..ff6fab4bae30 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -506,30 +506,24 @@ static ssize_t smmu_pmu_event_show(struct device *dev,
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
- return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
-#define SMMU_EVENT_ATTR(name, config) \
- PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
- config, smmu_pmu_event_show)
-SMMU_EVENT_ATTR(cycles, 0);
-SMMU_EVENT_ATTR(transaction, 1);
-SMMU_EVENT_ATTR(tlb_miss, 2);
-SMMU_EVENT_ATTR(config_cache_miss, 3);
-SMMU_EVENT_ATTR(trans_table_walk_access, 4);
-SMMU_EVENT_ATTR(config_struct_access, 5);
-SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
-SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
+#define SMMU_EVENT_ATTR(name, config) \
+ (&((struct perf_pmu_events_attr) { \
+ .attr = __ATTR(name, 0444, smmu_pmu_event_show, NULL), \
+ .id = config, \
+ }).attr.attr)
static struct attribute *smmu_pmu_events[] = {
- &smmu_event_attr_cycles.attr.attr,
- &smmu_event_attr_transaction.attr.attr,
- &smmu_event_attr_tlb_miss.attr.attr,
- &smmu_event_attr_config_cache_miss.attr.attr,
- &smmu_event_attr_trans_table_walk_access.attr.attr,
- &smmu_event_attr_config_struct_access.attr.attr,
- &smmu_event_attr_pcie_ats_trans_rq.attr.attr,
- &smmu_event_attr_pcie_ats_trans_passed.attr.attr,
+ SMMU_EVENT_ATTR(cycles, 0),
+ SMMU_EVENT_ATTR(transaction, 1),
+ SMMU_EVENT_ATTR(tlb_miss, 2),
+ SMMU_EVENT_ATTR(config_cache_miss, 3),
+ SMMU_EVENT_ATTR(trans_table_walk_access, 4),
+ SMMU_EVENT_ATTR(config_struct_access, 5),
+ SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
+ SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
NULL
};
@@ -560,7 +554,7 @@ static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
- return snprintf(page, PAGE_SIZE, "0x%08x\n", smmu_pmu->iidr);
+ return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
}
static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d3929ccebfd2..8a1e86ab2d8e 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -126,8 +126,7 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- arm_spe_pmu_cap_get(spe_pmu, cap));
+ return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
}
#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index be1f26b62ddb..2bbb93188064 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -110,7 +110,7 @@ static ssize_t ddr_perf_identifier_show(struct device *dev,
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
- return sprintf(page, "%s\n", pmu->devtype_data->identifier);
+ return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
}
static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
@@ -170,8 +170,7 @@ static ssize_t ddr_perf_filter_cap_show(struct device *dev,
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- ddr_perf_filter_cap_get(pmu, cap));
+ return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap));
}
#define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
@@ -220,7 +219,7 @@ ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
- return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index e8377061845f..7643c9f93e36 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
- hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o
+ hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
+ hisi_uncore_pa_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index ac1a8c120a00..7c8a4bc21db4 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -14,12 +14,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
-#include <linux/platform_device.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
-/* DDRC register definition */
+/* DDRC register definition in v1 */
#define DDRC_PERF_CTRL 0x010
#define DDRC_FLUX_WR 0x380
#define DDRC_FLUX_RD 0x384
@@ -35,12 +34,24 @@
#define DDRC_INT_CLEAR 0x6d0
#define DDRC_VERSION 0x710
+/* DDRC register definition in v2 */
+#define DDRC_V2_INT_MASK 0x528
+#define DDRC_V2_INT_STATUS 0x52c
+#define DDRC_V2_INT_CLEAR 0x530
+#define DDRC_V2_EVENT_CNT 0xe00
+#define DDRC_V2_EVENT_CTRL 0xe70
+#define DDRC_V2_EVENT_TYPE 0xe74
+#define DDRC_V2_PERF_CTRL 0xeA0
+
/* DDRC has 8-counters */
#define DDRC_NR_COUNTERS 0x8
-#define DDRC_PERF_CTRL_EN 0x2
+#define DDRC_V1_PERF_CTRL_EN 0x2
+#define DDRC_V2_PERF_CTRL_EN 0x1
+#define DDRC_V1_NR_EVENTS 0x7
+#define DDRC_V2_NR_EVENTS 0x90
/*
- * For DDRC PMU, there are eight-events and every event has been mapped
+ * For PMU v1, there are eight-events and every event has been mapped
* to fixed-purpose counters which register offset is not consistent.
* Therefore there is no write event type and we assume that event
* code (0 to 7) is equal to counter index in PMU driver.
@@ -54,73 +65,85 @@ static const u32 ddrc_reg_off[] = {
/*
* Select the counter register offset using the counter index.
- * In DDRC there are no programmable counter, the count
- * is readed form the statistics counter register itself.
+ * In PMU v1, there are no programmable counter, the count
+ * is read form the statistics counter register itself.
*/
-static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
+static u32 hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx)
{
return ddrc_reg_off[cntr_idx];
}
-static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static u32 hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx)
{
- /* Use event code as counter index */
- u32 idx = GET_DDRC_EVENTID(hwc);
-
- if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
- dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
- return 0;
- }
+ return DDRC_V2_EVENT_CNT + cntr_idx * 8;
+}
- return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+static u64 hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readl(ddrc_pmu->base +
+ hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
}
-static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
+static void hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc, u64 val)
{
- u32 idx = GET_DDRC_EVENTID(hwc);
+ writel((u32)val,
+ ddrc_pmu->base + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
+}
- if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
- dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
- return;
- }
+static u64 hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(ddrc_pmu->base +
+ hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
+}
- writel((u32)val,
- ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val,
+ ddrc_pmu->base + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
}
/*
- * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
- * so there is no need to write event type.
+ * For DDRC PMU v1, event has been mapped to fixed-purpose counter by hardware,
+ * so there is no need to write event type, while it is programmable counter in
+ * PMU v2.
*/
static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
u32 type)
{
+ u32 offset;
+
+ if (hha_pmu->identifier >= HISI_PMU_V2) {
+ offset = DDRC_V2_EVENT_TYPE + 4 * idx;
+ writel(type, hha_pmu->base + offset);
+ }
}
-static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu *ddrc_pmu)
{
u32 val;
/* Set perf_enable in DDRC_PERF_CTRL to start event counting */
val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
- val |= DDRC_PERF_CTRL_EN;
+ val |= DDRC_V1_PERF_CTRL_EN;
writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
}
-static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu *ddrc_pmu)
{
u32 val;
/* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
- val &= ~DDRC_PERF_CTRL_EN;
+ val &= ~DDRC_V1_PERF_CTRL_EN;
writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
}
-static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
u32 val;
@@ -130,8 +153,8 @@ static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
}
-static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
u32 val;
@@ -141,7 +164,7 @@ static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
}
-static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
+static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
@@ -157,87 +180,117 @@ static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
return idx;
}
-static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+static int hisi_ddrc_pmu_v2_get_event_idx(struct perf_event *event)
+{
+ return hisi_uncore_pmu_get_event_idx(event);
+}
+
+static void hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu *ddrc_pmu)
+{
+ u32 val;
+
+ val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+ val |= DDRC_V2_PERF_CTRL_EN;
+ writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu *ddrc_pmu)
+{
+ u32 val;
+
+ val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+ val &= ~DDRC_V2_PERF_CTRL_EN;
+ writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+ val |= 1 << hwc->idx;
+ writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
+ val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
/* Write 0 to enable interrupt */
val = readl(ddrc_pmu->base + DDRC_INT_MASK);
- val &= ~(1 << GET_DDRC_EVENTID(hwc));
+ val &= ~(1 << hwc->idx);
writel(val, ddrc_pmu->base + DDRC_INT_MASK);
}
-static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
u32 val;
/* Write 1 to mask interrupt */
val = readl(ddrc_pmu->base + DDRC_INT_MASK);
- val |= (1 << GET_DDRC_EVENTID(hwc));
+ val |= 1 << hwc->idx;
writel(val, ddrc_pmu->base + DDRC_INT_MASK);
}
-static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
+static void hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
- struct hisi_pmu *ddrc_pmu = dev_id;
- struct perf_event *event;
- unsigned long overflown;
- int idx;
-
- /* Read the DDRC_INT_STATUS register */
- overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
- if (!overflown)
- return IRQ_NONE;
+ u32 val;
- /*
- * Find the counter index which overflowed if the bit was set
- * and handle it
- */
- for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
- /* Write 1 to clear the IRQ status flag */
- writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
+ val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
+ val &= ~(1 << hwc->idx);
+ writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
+}
- /* Get the corresponding event struct */
- event = ddrc_pmu->pmu_events.hw_events[idx];
- if (!event)
- continue;
+static void hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
- hisi_uncore_pmu_event_update(event);
- hisi_uncore_pmu_set_event_period(event);
- }
+ val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
+ val |= 1 << hwc->idx;
+ writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
+}
- return IRQ_HANDLED;
+static u32 hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu *ddrc_pmu)
+{
+ return readl(ddrc_pmu->base + DDRC_INT_STATUS);
}
-static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
- struct platform_device *pdev)
+static void hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu *ddrc_pmu,
+ int idx)
{
- int irq, ret;
-
- /* Read and init IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
- IRQF_NOBALANCING | IRQF_NO_THREAD,
- dev_name(&pdev->dev), ddrc_pmu);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "Fail to request IRQ:%d ret:%d\n", irq, ret);
- return ret;
- }
+ writel(1 << idx, ddrc_pmu->base + DDRC_INT_CLEAR);
+}
- ddrc_pmu->irq = irq;
+static u32 hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu *ddrc_pmu)
+{
+ return readl(ddrc_pmu->base + DDRC_V2_INT_STATUS);
+}
- return 0;
+static void hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu *ddrc_pmu,
+ int idx)
+{
+ writel(1 << idx, ddrc_pmu->base + DDRC_V2_INT_CLEAR);
}
static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
{ "HISI0233", },
- {},
+ { "HISI0234", },
+ {}
};
MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
@@ -269,21 +322,38 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
}
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
+ if (ddrc_pmu->identifier >= HISI_PMU_V2) {
+ if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
+ &ddrc_pmu->sub_id)) {
+ dev_err(&pdev->dev, "Can not read sub-id!\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
-static struct attribute *hisi_ddrc_pmu_format_attr[] = {
+static struct attribute *hisi_ddrc_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
NULL,
};
-static const struct attribute_group hisi_ddrc_pmu_format_group = {
+static const struct attribute_group hisi_ddrc_pmu_v1_format_group = {
+ .name = "format",
+ .attrs = hisi_ddrc_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL
+};
+
+static const struct attribute_group hisi_ddrc_pmu_v2_format_group = {
.name = "format",
- .attrs = hisi_ddrc_pmu_format_attr,
+ .attrs = hisi_ddrc_pmu_v2_format_attr,
};
-static struct attribute *hisi_ddrc_pmu_events_attr[] = {
+static struct attribute *hisi_ddrc_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
@@ -295,9 +365,21 @@ static struct attribute *hisi_ddrc_pmu_events_attr[] = {
NULL,
};
-static const struct attribute_group hisi_ddrc_pmu_events_group = {
+static const struct attribute_group hisi_ddrc_pmu_v1_events_group = {
.name = "events",
- .attrs = hisi_ddrc_pmu_events_attr,
+ .attrs = hisi_ddrc_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(cycles, 0x00),
+ HISI_PMU_EVENT_ATTR(flux_wr, 0x83),
+ HISI_PMU_EVENT_ATTR(flux_rd, 0x84),
+ NULL
+};
+
+static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
+ .name = "events",
+ .attrs = hisi_ddrc_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -323,25 +405,50 @@ static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
.attrs = hisi_ddrc_pmu_identifier_attrs,
};
-static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
- &hisi_ddrc_pmu_format_group,
- &hisi_ddrc_pmu_events_group,
+static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
+ &hisi_ddrc_pmu_v1_format_group,
+ &hisi_ddrc_pmu_v1_events_group,
&hisi_ddrc_pmu_cpumask_attr_group,
&hisi_ddrc_pmu_identifier_group,
NULL,
};
-static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
+static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
+ &hisi_ddrc_pmu_v2_format_group,
+ &hisi_ddrc_pmu_v2_events_group,
+ &hisi_ddrc_pmu_cpumask_attr_group,
+ &hisi_ddrc_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_v1_ops = {
+ .write_evtype = hisi_ddrc_pmu_write_evtype,
+ .get_event_idx = hisi_ddrc_pmu_v1_get_event_idx,
+ .start_counters = hisi_ddrc_pmu_v1_start_counters,
+ .stop_counters = hisi_ddrc_pmu_v1_stop_counters,
+ .enable_counter = hisi_ddrc_pmu_v1_enable_counter,
+ .disable_counter = hisi_ddrc_pmu_v1_disable_counter,
+ .enable_counter_int = hisi_ddrc_pmu_v1_enable_counter_int,
+ .disable_counter_int = hisi_ddrc_pmu_v1_disable_counter_int,
+ .write_counter = hisi_ddrc_pmu_v1_write_counter,
+ .read_counter = hisi_ddrc_pmu_v1_read_counter,
+ .get_int_status = hisi_ddrc_pmu_v1_get_int_status,
+ .clear_int_status = hisi_ddrc_pmu_v1_clear_int_status,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_v2_ops = {
.write_evtype = hisi_ddrc_pmu_write_evtype,
- .get_event_idx = hisi_ddrc_pmu_get_event_idx,
- .start_counters = hisi_ddrc_pmu_start_counters,
- .stop_counters = hisi_ddrc_pmu_stop_counters,
- .enable_counter = hisi_ddrc_pmu_enable_counter,
- .disable_counter = hisi_ddrc_pmu_disable_counter,
- .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
- .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
- .write_counter = hisi_ddrc_pmu_write_counter,
- .read_counter = hisi_ddrc_pmu_read_counter,
+ .get_event_idx = hisi_ddrc_pmu_v2_get_event_idx,
+ .start_counters = hisi_ddrc_pmu_v2_start_counters,
+ .stop_counters = hisi_ddrc_pmu_v2_stop_counters,
+ .enable_counter = hisi_ddrc_pmu_v2_enable_counter,
+ .disable_counter = hisi_ddrc_pmu_v2_disable_counter,
+ .enable_counter_int = hisi_ddrc_pmu_v2_enable_counter_int,
+ .disable_counter_int = hisi_ddrc_pmu_v2_disable_counter_int,
+ .write_counter = hisi_ddrc_pmu_v2_write_counter,
+ .read_counter = hisi_ddrc_pmu_v2_read_counter,
+ .get_int_status = hisi_ddrc_pmu_v2_get_int_status,
+ .clear_int_status = hisi_ddrc_pmu_v2_clear_int_status,
};
static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
@@ -353,16 +460,25 @@ static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
+ ret = hisi_uncore_pmu_init_irq(ddrc_pmu, pdev);
if (ret)
return ret;
+ if (ddrc_pmu->identifier >= HISI_PMU_V2) {
+ ddrc_pmu->counter_bits = 48;
+ ddrc_pmu->check_event = DDRC_V2_NR_EVENTS;
+ ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v2_attr_groups;
+ ddrc_pmu->ops = &hisi_uncore_ddrc_v2_ops;
+ } else {
+ ddrc_pmu->counter_bits = 32;
+ ddrc_pmu->check_event = DDRC_V1_NR_EVENTS;
+ ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v1_attr_groups;
+ ddrc_pmu->ops = &hisi_uncore_ddrc_v1_ops;
+ }
+
ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
- ddrc_pmu->counter_bits = 32;
- ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
ddrc_pmu->dev = &pdev->dev;
ddrc_pmu->on_cpu = -1;
- ddrc_pmu->check_event = 7;
return 0;
}
@@ -390,8 +506,16 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
return ret;
}
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
- ddrc_pmu->sccl_id, ddrc_pmu->index_id);
+ if (ddrc_pmu->identifier >= HISI_PMU_V2)
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "hisi_sccl%u_ddrc%u_%u",
+ ddrc_pmu->sccl_id, ddrc_pmu->index_id,
+ ddrc_pmu->sub_id);
+ else
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
+ ddrc_pmu->index_id);
+
ddrc_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
@@ -404,7 +528,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
.start = hisi_uncore_pmu_start,
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
- .attr_groups = hisi_ddrc_pmu_attr_groups,
+ .attr_groups = ddrc_pmu->pmu_events.attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 3402f1a395a8..0316fabe32f1 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
-#include <linux/platform_device.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
@@ -26,18 +25,136 @@
#define HHA_VERSION 0x1cf0
#define HHA_PERF_CTRL 0x1E00
#define HHA_EVENT_CTRL 0x1E04
+#define HHA_SRCID_CTRL 0x1E08
+#define HHA_DATSRC_CTRL 0x1BF0
#define HHA_EVENT_TYPE0 0x1E80
/*
- * Each counter is 48-bits and [48:63] are reserved
- * which are Read-As-Zero and Writes-Ignored.
+ * If the HW version only supports a 48-bit counter, then
+ * bits [63:48] are reserved, which are Read-As-Zero and
+ * Writes-Ignored.
*/
#define HHA_CNT0_LOWER 0x1F00
-/* HHA has 16-counters */
-#define HHA_NR_COUNTERS 0x10
+/* HHA PMU v1 has 16 counters and v2 only has 8 counters */
+#define HHA_V1_NR_COUNTERS 0x10
+#define HHA_V2_NR_COUNTERS 0x8
#define HHA_PERF_CTRL_EN 0x1
+#define HHA_TRACETAG_EN BIT(31)
+#define HHA_SRCID_EN BIT(2)
+#define HHA_SRCID_CMD_SHIFT 6
+#define HHA_SRCID_MSK_SHIFT 20
+#define HHA_SRCID_CMD GENMASK(16, 6)
+#define HHA_SRCID_MSK GENMASK(30, 20)
+#define HHA_DATSRC_SKT_EN BIT(23)
#define HHA_EVTYPE_NONE 0xff
+#define HHA_V1_NR_EVENT 0x65
+#define HHA_V2_NR_EVENT 0xCE
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 22, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 23, 23);
+
+static void hisi_hha_pmu_enable_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+ val |= HHA_TRACETAG_EN;
+ writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_clear_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+ val &= ~HHA_TRACETAG_EN;
+ writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+}
+
+static void hisi_hha_pmu_config_ds(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 ds_skt = hisi_get_datasrc_skt(event);
+
+ if (ds_skt) {
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
+ val |= HHA_DATSRC_SKT_EN;
+ writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_clear_ds(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 ds_skt = hisi_get_datasrc_skt(event);
+
+ if (ds_skt) {
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
+ val &= ~HHA_DATSRC_SKT_EN;
+ writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_config_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 val, msk;
+
+ msk = hisi_get_srcid_msk(event);
+ val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+ val |= HHA_SRCID_EN | (cmd << HHA_SRCID_CMD_SHIFT) |
+ (msk << HHA_SRCID_MSK_SHIFT);
+ writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_disable_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 val;
+
+ val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+ val &= ~(HHA_SRCID_EN | HHA_SRCID_MSK | HHA_SRCID_CMD);
+ writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+ }
+}
+
+static void hisi_hha_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_hha_pmu_enable_tracetag(event);
+ hisi_hha_pmu_config_ds(event);
+ hisi_hha_pmu_config_srcid(event);
+ }
+}
+
+static void hisi_hha_pmu_disable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_hha_pmu_disable_srcid(event);
+ hisi_hha_pmu_clear_ds(event);
+ hisi_hha_pmu_clear_tracetag(event);
+ }
+}
/*
* Select the counter register offset using the counter index
@@ -51,29 +168,15 @@ static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc)
{
- u32 idx = hwc->idx;
-
- if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
- dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
- return 0;
- }
-
/* Read 64 bits and like L3C, top 16 bits are RAZ */
- return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+ return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx));
}
static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc, u64 val)
{
- u32 idx = hwc->idx;
-
- if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
- dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
- return;
- }
-
/* Write 64 bits and like L3C, top 16 bits are WI */
- writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+ writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx));
}
static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
@@ -169,65 +272,20 @@ static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
writel(val, hha_pmu->base + HHA_INT_MASK);
}
-static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
+static u32 hisi_hha_pmu_get_int_status(struct hisi_pmu *hha_pmu)
{
- struct hisi_pmu *hha_pmu = dev_id;
- struct perf_event *event;
- unsigned long overflown;
- int idx;
-
- /* Read HHA_INT_STATUS register */
- overflown = readl(hha_pmu->base + HHA_INT_STATUS);
- if (!overflown)
- return IRQ_NONE;
-
- /*
- * Find the counter index which overflowed if the bit was set
- * and handle it
- */
- for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
- /* Write 1 to clear the IRQ status flag */
- writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
-
- /* Get the corresponding event struct */
- event = hha_pmu->pmu_events.hw_events[idx];
- if (!event)
- continue;
-
- hisi_uncore_pmu_event_update(event);
- hisi_uncore_pmu_set_event_period(event);
- }
-
- return IRQ_HANDLED;
+ return readl(hha_pmu->base + HHA_INT_STATUS);
}
-static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
- struct platform_device *pdev)
+static void hisi_hha_pmu_clear_int_status(struct hisi_pmu *hha_pmu, int idx)
{
- int irq, ret;
-
- /* Read and init IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
- IRQF_NOBALANCING | IRQF_NO_THREAD,
- dev_name(&pdev->dev), hha_pmu);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "Fail to request IRQ:%d ret:%d\n", irq, ret);
- return ret;
- }
-
- hha_pmu->irq = irq;
-
- return 0;
+ writel(1 << idx, hha_pmu->base + HHA_INT_CLEAR);
}
static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
{ "HISI0243", },
- {},
+ { "HISI0244", },
+ {}
};
MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
@@ -237,13 +295,6 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
unsigned long long id;
acpi_status status;
- status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
- "_UID", NULL, &id);
- if (ACPI_FAILURE(status))
- return -EINVAL;
-
- hha_pmu->index_id = id;
-
/*
* Use SCCL_ID and UID to identify the HHA PMU, while
* SCCL_ID is in MPIDR[aff2].
@@ -253,6 +304,22 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
return -EINVAL;
}
+
+ /*
+ * Early versions of BIOS support _UID by mistake, so we support
+ * both "hisilicon, idx-id" as preference, if available.
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+ &hha_pmu->index_id)) {
+ status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ "_UID", NULL, &id);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev, "Cannot read idx-id!\n");
+ return -EINVAL;
+ }
+
+ hha_pmu->index_id = id;
+ }
/* HHA PMUs only share the same SCCL */
hha_pmu->ccl_id = -1;
@@ -267,17 +334,31 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
return 0;
}
-static struct attribute *hisi_hha_pmu_format_attr[] = {
+static struct attribute *hisi_hha_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
};
-static const struct attribute_group hisi_hha_pmu_format_group = {
+static const struct attribute_group hisi_hha_pmu_v1_format_group = {
+ .name = "format",
+ .attrs = hisi_hha_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_hha_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:0-10"),
+ HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:11-21"),
+ HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:22"),
+ HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:23"),
+ NULL
+};
+
+static const struct attribute_group hisi_hha_pmu_v2_format_group = {
.name = "format",
- .attrs = hisi_hha_pmu_format_attr,
+ .attrs = hisi_hha_pmu_v2_format_attr,
};
-static struct attribute *hisi_hha_pmu_events_attr[] = {
+static struct attribute *hisi_hha_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
@@ -307,9 +388,23 @@ static struct attribute *hisi_hha_pmu_events_attr[] = {
NULL,
};
-static const struct attribute_group hisi_hha_pmu_events_group = {
+static const struct attribute_group hisi_hha_pmu_v1_events_group = {
.name = "events",
- .attrs = hisi_hha_pmu_events_attr,
+ .attrs = hisi_hha_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_hha_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
+ HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
+ HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
+ HISI_PMU_EVENT_ATTR(hha_retry, 0x2e),
+ HISI_PMU_EVENT_ATTR(cycles, 0x55),
+ NULL
+};
+
+static const struct attribute_group hisi_hha_pmu_v2_events_group = {
+ .name = "events",
+ .attrs = hisi_hha_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -335,14 +430,22 @@ static const struct attribute_group hisi_hha_pmu_identifier_group = {
.attrs = hisi_hha_pmu_identifier_attrs,
};
-static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
- &hisi_hha_pmu_format_group,
- &hisi_hha_pmu_events_group,
+static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = {
+ &hisi_hha_pmu_v1_format_group,
+ &hisi_hha_pmu_v1_events_group,
&hisi_hha_pmu_cpumask_attr_group,
&hisi_hha_pmu_identifier_group,
NULL,
};
+static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = {
+ &hisi_hha_pmu_v2_format_group,
+ &hisi_hha_pmu_v2_events_group,
+ &hisi_hha_pmu_cpumask_attr_group,
+ &hisi_hha_pmu_identifier_group,
+ NULL
+};
+
static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
.write_evtype = hisi_hha_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
@@ -354,6 +457,10 @@ static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
.disable_counter_int = hisi_hha_pmu_disable_counter_int,
.write_counter = hisi_hha_pmu_write_counter,
.read_counter = hisi_hha_pmu_read_counter,
+ .get_int_status = hisi_hha_pmu_get_int_status,
+ .clear_int_status = hisi_hha_pmu_clear_int_status,
+ .enable_filter = hisi_hha_pmu_enable_filter,
+ .disable_filter = hisi_hha_pmu_disable_filter,
};
static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
@@ -365,16 +472,24 @@ static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
+ ret = hisi_uncore_pmu_init_irq(hha_pmu, pdev);
if (ret)
return ret;
- hha_pmu->num_counters = HHA_NR_COUNTERS;
- hha_pmu->counter_bits = 48;
+ if (hha_pmu->identifier >= HISI_PMU_V2) {
+ hha_pmu->counter_bits = 64;
+ hha_pmu->check_event = HHA_V2_NR_EVENT;
+ hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v2_attr_groups;
+ hha_pmu->num_counters = HHA_V2_NR_COUNTERS;
+ } else {
+ hha_pmu->counter_bits = 48;
+ hha_pmu->check_event = HHA_V1_NR_EVENT;
+ hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v1_attr_groups;
+ hha_pmu->num_counters = HHA_V1_NR_COUNTERS;
+ }
hha_pmu->ops = &hisi_uncore_hha_ops;
hha_pmu->dev = &pdev->dev;
hha_pmu->on_cpu = -1;
- hha_pmu->check_event = 0x65;
return 0;
}
@@ -416,7 +531,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
.start = hisi_uncore_pmu_start,
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
- .attr_groups = hisi_hha_pmu_attr_groups,
+ .attr_groups = hha_pmu->pmu_events.attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 7d792435c2aa..bf9f7772cac9 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
-#include <linux/platform_device.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
@@ -24,12 +23,17 @@
#define L3C_INT_MASK 0x0800
#define L3C_INT_STATUS 0x0808
#define L3C_INT_CLEAR 0x080c
+#define L3C_CORE_CTRL 0x1b04
+#define L3C_TRACETAG_CTRL 0x1b20
+#define L3C_DATSRC_TYPE 0x1b48
+#define L3C_DATSRC_CTRL 0x1bf0
#define L3C_EVENT_CTRL 0x1c00
#define L3C_VERSION 0x1cf0
#define L3C_EVENT_TYPE0 0x1d00
/*
- * Each counter is 48-bits and [48:63] are reserved
- * which are Read-As-Zero and Writes-Ignored.
+ * If the HW version only supports a 48-bit counter, then
+ * bits [63:48] are reserved, which are Read-As-Zero and
+ * Writes-Ignored.
*/
#define L3C_CNTR0_LOWER 0x1e00
@@ -37,7 +41,186 @@
#define L3C_NR_COUNTERS 0x8
#define L3C_PERF_CTRL_EN 0x10000
+#define L3C_TRACETAG_EN BIT(31)
+#define L3C_TRACETAG_REQ_SHIFT 7
+#define L3C_TRACETAG_MARK_EN BIT(0)
+#define L3C_TRACETAG_REQ_EN (L3C_TRACETAG_MARK_EN | BIT(2))
+#define L3C_TRACETAG_CORE_EN (L3C_TRACETAG_MARK_EN | BIT(3))
+#define L3C_CORE_EN BIT(20)
+#define L3C_COER_NONE 0x0
+#define L3C_DATSRC_MASK 0xFF
+#define L3C_DATSRC_SKT_EN BIT(23)
+#define L3C_DATSRC_NONE 0x0
#define L3C_EVTYPE_NONE 0xff
+#define L3C_V1_NR_EVENTS 0x59
+#define L3C_V2_NR_EVENTS 0xFF
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config1, 7, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16);
+
+static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_req = hisi_get_tt_req(event);
+
+ if (tt_req) {
+ u32 val;
+
+ /* Set request-type for tracetag */
+ val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val |= tt_req << L3C_TRACETAG_REQ_SHIFT;
+ val |= L3C_TRACETAG_REQ_EN;
+ writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+
+ /* Enable request-tracetag statistics */
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_TRACETAG_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_req = hisi_get_tt_req(event);
+
+ if (tt_req) {
+ u32 val;
+
+ /* Clear request-type */
+ val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT);
+ val &= ~L3C_TRACETAG_REQ_EN;
+ writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+
+ /* Disable request-tracetag statistics */
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~L3C_TRACETAG_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 reg, reg_idx, shift, val;
+ int idx = hwc->idx;
+
+ /*
+ * Select the appropriate datasource register(L3C_DATSRC_TYPE0/1).
+ * There are 2 datasource ctrl register for the 8 hardware counters.
+ * Datasrc is 8-bits and for the former 4 hardware counters,
+ * L3C_DATSRC_TYPE0 is chosen. For the latter 4 hardware counters,
+ * L3C_DATSRC_TYPE1 is chosen.
+ */
+ reg = L3C_DATSRC_TYPE + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ val = readl(l3c_pmu->base + reg);
+ val &= ~(L3C_DATSRC_MASK << shift);
+ val |= ds_cfg << shift;
+ writel(val, l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_config_ds(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 ds_cfg = hisi_get_datasrc_cfg(event);
+ u32 ds_skt = hisi_get_datasrc_skt(event);
+
+ if (ds_cfg)
+ hisi_l3c_pmu_write_ds(event, ds_cfg);
+
+ if (ds_skt) {
+ u32 val;
+
+ val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val |= L3C_DATSRC_SKT_EN;
+ writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 ds_cfg = hisi_get_datasrc_cfg(event);
+ u32 ds_skt = hisi_get_datasrc_skt(event);
+
+ if (ds_cfg)
+ hisi_l3c_pmu_write_ds(event, L3C_DATSRC_NONE);
+
+ if (ds_skt) {
+ u32 val;
+
+ val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val &= ~L3C_DATSRC_SKT_EN;
+ writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 core = hisi_get_tt_core(event);
+
+ if (core) {
+ u32 val;
+
+ /* Config and enable core information */
+ writel(core, l3c_pmu->base + L3C_CORE_CTRL);
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_CORE_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+
+ /* Enable core-tracetag statistics */
+ val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val |= L3C_TRACETAG_CORE_EN;
+ writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ u32 core = hisi_get_tt_core(event);
+
+ if (core) {
+ u32 val;
+
+ /* Clear core information */
+ writel(L3C_COER_NONE, l3c_pmu->base + L3C_CORE_CTRL);
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~L3C_CORE_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+
+ /* Disable core-tracetag statistics */
+ val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val &= ~L3C_TRACETAG_CORE_EN;
+ writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ }
+}
+
+static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_l3c_pmu_config_req_tracetag(event);
+ hisi_l3c_pmu_config_core_tracetag(event);
+ hisi_l3c_pmu_config_ds(event);
+ }
+}
+
+static void hisi_l3c_pmu_disable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_l3c_pmu_clear_ds(event);
+ hisi_l3c_pmu_clear_core_tracetag(event);
+ hisi_l3c_pmu_clear_req_tracetag(event);
+ }
+}
/*
* Select the counter register offset using the counter index
@@ -50,29 +233,13 @@ static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
- u32 idx = hwc->idx;
-
- if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
- dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
- return 0;
- }
-
- /* Read 64-bits and the upper 16 bits are RAZ */
- return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+ return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc, u64 val)
{
- u32 idx = hwc->idx;
-
- if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
- dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
- return;
- }
-
- /* Write 64-bits and the upper 16 bits are WI */
- writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+ writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
@@ -168,81 +335,26 @@ static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
writel(val, l3c_pmu->base + L3C_INT_MASK);
}
-static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
+static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu)
{
- struct hisi_pmu *l3c_pmu = dev_id;
- struct perf_event *event;
- unsigned long overflown;
- int idx;
-
- /* Read L3C_INT_STATUS register */
- overflown = readl(l3c_pmu->base + L3C_INT_STATUS);
- if (!overflown)
- return IRQ_NONE;
-
- /*
- * Find the counter index which overflowed if the bit was set
- * and handle it.
- */
- for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) {
- /* Write 1 to clear the IRQ status flag */
- writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR);
-
- /* Get the corresponding event struct */
- event = l3c_pmu->pmu_events.hw_events[idx];
- if (!event)
- continue;
-
- hisi_uncore_pmu_event_update(event);
- hisi_uncore_pmu_set_event_period(event);
- }
-
- return IRQ_HANDLED;
+ return readl(l3c_pmu->base + L3C_INT_STATUS);
}
-static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
- struct platform_device *pdev)
+static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx)
{
- int irq, ret;
-
- /* Read and init IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
- IRQF_NOBALANCING | IRQF_NO_THREAD,
- dev_name(&pdev->dev), l3c_pmu);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "Fail to request IRQ:%d ret:%d\n", irq, ret);
- return ret;
- }
-
- l3c_pmu->irq = irq;
-
- return 0;
+ writel(1 << idx, l3c_pmu->base + L3C_INT_CLEAR);
}
static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
{ "HISI0213", },
- {},
+ { "HISI0214", },
+ {}
};
MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
- unsigned long long id;
- acpi_status status;
-
- status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
- "_UID", NULL, &id);
- if (ACPI_FAILURE(status))
- return -EINVAL;
-
- l3c_pmu->index_id = id;
-
/*
* Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
* SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
@@ -270,17 +382,31 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return 0;
}
-static struct attribute *hisi_l3c_pmu_format_attr[] = {
+static struct attribute *hisi_l3c_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
};
-static const struct attribute_group hisi_l3c_pmu_format_group = {
+static const struct attribute_group hisi_l3c_pmu_v1_format_group = {
+ .name = "format",
+ .attrs = hisi_l3c_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_core, "config1:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
+ HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"),
+ HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v2_format_group = {
.name = "format",
- .attrs = hisi_l3c_pmu_format_attr,
+ .attrs = hisi_l3c_pmu_v2_format_attr,
};
-static struct attribute *hisi_l3c_pmu_events_attr[] = {
+static struct attribute *hisi_l3c_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00),
HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01),
HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02),
@@ -297,9 +423,22 @@ static struct attribute *hisi_l3c_pmu_events_attr[] = {
NULL,
};
-static const struct attribute_group hisi_l3c_pmu_events_group = {
+static const struct attribute_group hisi_l3c_pmu_v1_events_group = {
+ .name = "events",
+ .attrs = hisi_l3c_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(l3c_hit, 0x48),
+ HISI_PMU_EVENT_ATTR(cycles, 0x7f),
+ HISI_PMU_EVENT_ATTR(l3c_ref, 0xb8),
+ HISI_PMU_EVENT_ATTR(dat_access, 0xb9),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.name = "events",
- .attrs = hisi_l3c_pmu_events_attr,
+ .attrs = hisi_l3c_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -325,14 +464,22 @@ static const struct attribute_group hisi_l3c_pmu_identifier_group = {
.attrs = hisi_l3c_pmu_identifier_attrs,
};
-static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
- &hisi_l3c_pmu_format_group,
- &hisi_l3c_pmu_events_group,
+static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
+ &hisi_l3c_pmu_v1_format_group,
+ &hisi_l3c_pmu_v1_events_group,
&hisi_l3c_pmu_cpumask_attr_group,
&hisi_l3c_pmu_identifier_group,
NULL,
};
+static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
+ &hisi_l3c_pmu_v2_format_group,
+ &hisi_l3c_pmu_v2_events_group,
+ &hisi_l3c_pmu_cpumask_attr_group,
+ &hisi_l3c_pmu_identifier_group,
+ NULL
+};
+
static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.write_evtype = hisi_l3c_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
@@ -344,6 +491,10 @@ static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.disable_counter_int = hisi_l3c_pmu_disable_counter_int,
.write_counter = hisi_l3c_pmu_write_counter,
.read_counter = hisi_l3c_pmu_read_counter,
+ .get_int_status = hisi_l3c_pmu_get_int_status,
+ .clear_int_status = hisi_l3c_pmu_clear_int_status,
+ .enable_filter = hisi_l3c_pmu_enable_filter,
+ .disable_filter = hisi_l3c_pmu_disable_filter,
};
static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
@@ -355,16 +506,24 @@ static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
+ ret = hisi_uncore_pmu_init_irq(l3c_pmu, pdev);
if (ret)
return ret;
+ if (l3c_pmu->identifier >= HISI_PMU_V2) {
+ l3c_pmu->counter_bits = 64;
+ l3c_pmu->check_event = L3C_V2_NR_EVENTS;
+ l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v2_attr_groups;
+ } else {
+ l3c_pmu->counter_bits = 48;
+ l3c_pmu->check_event = L3C_V1_NR_EVENTS;
+ l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v1_attr_groups;
+ }
+
l3c_pmu->num_counters = L3C_NR_COUNTERS;
- l3c_pmu->counter_bits = 48;
l3c_pmu->ops = &hisi_uncore_l3c_ops;
l3c_pmu->dev = &pdev->dev;
l3c_pmu->on_cpu = -1;
- l3c_pmu->check_event = 0x59;
return 0;
}
@@ -392,8 +551,12 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
return ret;
}
+ /*
+ * CCL_ID is used to identify the L3C in the same SCCL which was
+ * used _UID by mistake.
+ */
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
- l3c_pmu->sccl_id, l3c_pmu->index_id);
+ l3c_pmu->sccl_id, l3c_pmu->ccl_id);
l3c_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
@@ -406,7 +569,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
.start = hisi_uncore_pmu_start,
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
- .attr_groups = hisi_l3c_pmu_attr_groups,
+ .attr_groups = l3c_pmu->pmu_events.attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
new file mode 100644
index 000000000000..14f23eb31248
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon PA uncore Hardware event counters support
+ *
+ * Copyright (C) 2020 HiSilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ */
+#include <linux/acpi.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* PA register definition */
+#define PA_PERF_CTRL 0x1c00
+#define PA_EVENT_CTRL 0x1c04
+#define PA_TT_CTRL 0x1c08
+#define PA_TGTID_CTRL 0x1c14
+#define PA_SRCID_CTRL 0x1c18
+#define PA_INT_MASK 0x1c70
+#define PA_INT_STATUS 0x1c78
+#define PA_INT_CLEAR 0x1c7c
+#define PA_EVENT_TYPE0 0x1c80
+#define PA_PMU_VERSION 0x1cf0
+#define PA_EVENT_CNT0_L 0x1f00
+
+#define PA_EVTYPE_MASK 0xff
+#define PA_NR_COUNTERS 0x8
+#define PA_PERF_CTRL_EN BIT(0)
+#define PA_TRACETAG_EN BIT(4)
+#define PA_TGTID_EN BIT(11)
+#define PA_SRCID_EN BIT(11)
+#define PA_TGTID_NONE 0
+#define PA_SRCID_NONE 0
+#define PA_TGTID_MSK_SHIFT 12
+#define PA_SRCID_MSK_SHIFT 12
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_cmd, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_msk, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
+
+static void hisi_pa_pmu_enable_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(pa_pmu->base + PA_TT_CTRL);
+ val |= PA_TRACETAG_EN;
+ writel(val, pa_pmu->base + PA_TT_CTRL);
+ }
+}
+
+static void hisi_pa_pmu_clear_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(pa_pmu->base + PA_TT_CTRL);
+ val &= ~PA_TRACETAG_EN;
+ writel(val, pa_pmu->base + PA_TT_CTRL);
+ }
+}
+
+static void hisi_pa_pmu_config_tgtid(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_tgtid_cmd(event);
+
+ if (cmd) {
+ u32 msk = hisi_get_tgtid_msk(event);
+ u32 val = cmd | PA_TGTID_EN | (msk << PA_TGTID_MSK_SHIFT);
+
+ writel(val, pa_pmu->base + PA_TGTID_CTRL);
+ }
+}
+
+static void hisi_pa_pmu_clear_tgtid(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_tgtid_cmd(event);
+
+ if (cmd)
+ writel(PA_TGTID_NONE, pa_pmu->base + PA_TGTID_CTRL);
+}
+
+static void hisi_pa_pmu_config_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 msk = hisi_get_srcid_msk(event);
+ u32 val = cmd | PA_SRCID_EN | (msk << PA_SRCID_MSK_SHIFT);
+
+ writel(val, pa_pmu->base + PA_SRCID_CTRL);
+ }
+}
+
+static void hisi_pa_pmu_clear_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd)
+ writel(PA_SRCID_NONE, pa_pmu->base + PA_SRCID_CTRL);
+}
+
+static void hisi_pa_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_pa_pmu_enable_tracetag(event);
+ hisi_pa_pmu_config_srcid(event);
+ hisi_pa_pmu_config_tgtid(event);
+ }
+}
+
+static void hisi_pa_pmu_disable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_pa_pmu_clear_tgtid(event);
+ hisi_pa_pmu_clear_srcid(event);
+ hisi_pa_pmu_clear_tracetag(event);
+ }
+}
+
+static u32 hisi_pa_pmu_get_counter_offset(int idx)
+{
+ return (PA_EVENT_CNT0_L + idx * 8);
+}
+
+static u64 hisi_pa_pmu_read_counter(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_pa_pmu_write_counter(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val, pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_pa_pmu_write_evtype(struct hisi_pmu *pa_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(PA_EVENT_TYPE0/1).
+ * There are 2 event select registers for the 8 hardware counters.
+ * Event code is 8-bits and for the former 4 hardware counters,
+ * PA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+ * PA_EVENT_TYPE1 is chosen.
+ */
+ reg = PA_EVENT_TYPE0 + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ /* Write event code to pa_EVENT_TYPEx Register */
+ val = readl(pa_pmu->base + reg);
+ val &= ~(PA_EVTYPE_MASK << shift);
+ val |= (type << shift);
+ writel(val, pa_pmu->base + reg);
+}
+
+static void hisi_pa_pmu_start_counters(struct hisi_pmu *pa_pmu)
+{
+ u32 val;
+
+ val = readl(pa_pmu->base + PA_PERF_CTRL);
+ val |= PA_PERF_CTRL_EN;
+ writel(val, pa_pmu->base + PA_PERF_CTRL);
+}
+
+static void hisi_pa_pmu_stop_counters(struct hisi_pmu *pa_pmu)
+{
+ u32 val;
+
+ val = readl(pa_pmu->base + PA_PERF_CTRL);
+ val &= ~(PA_PERF_CTRL_EN);
+ writel(val, pa_pmu->base + PA_PERF_CTRL);
+}
+
+static void hisi_pa_pmu_enable_counter(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index in PA_EVENT_CTRL register */
+ val = readl(pa_pmu->base + PA_EVENT_CTRL);
+ val |= 1 << hwc->idx;
+ writel(val, pa_pmu->base + PA_EVENT_CTRL);
+}
+
+static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index in PA_EVENT_CTRL register */
+ val = readl(pa_pmu->base + PA_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, pa_pmu->base + PA_EVENT_CTRL);
+}
+
+static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 0 to enable interrupt */
+ val = readl(pa_pmu->base + PA_INT_MASK);
+ val &= ~(1 << hwc->idx);
+ writel(val, pa_pmu->base + PA_INT_MASK);
+}
+
+static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 1 to mask interrupt */
+ val = readl(pa_pmu->base + PA_INT_MASK);
+ val |= 1 << hwc->idx;
+ writel(val, pa_pmu->base + PA_INT_MASK);
+}
+
+static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu)
+{
+ return readl(pa_pmu->base + PA_INT_STATUS);
+}
+
+static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
+{
+ writel(1 << idx, pa_pmu->base + PA_INT_CLEAR);
+}
+
+static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
+ { "HISI0273", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match);
+
+static int hisi_pa_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *pa_pmu)
+{
+ /*
+ * Use the SCCL_ID and the index ID to identify the PA PMU,
+ * while SCCL_ID is the nearst SCCL_ID from this SICL and
+ * CPU core is chosen from this SCCL to manage this PMU.
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &pa_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Cannot read sccl-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+ &pa_pmu->index_id)) {
+ dev_err(&pdev->dev, "Cannot read idx-id!\n");
+ return -EINVAL;
+ }
+
+ pa_pmu->ccl_id = -1;
+
+ pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pa_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n");
+ return PTR_ERR(pa_pmu->base);
+ }
+
+ pa_pmu->identifier = readl(pa_pmu->base + PA_PMU_VERSION);
+
+ return 0;
+}
+
+static struct attribute *hisi_pa_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(tgtid_cmd, "config1:0-10"),
+ HISI_PMU_FORMAT_ATTR(tgtid_msk, "config1:11-21"),
+ HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
+ HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
+ HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
+ NULL,
+};
+
+static const struct attribute_group hisi_pa_pmu_v2_format_group = {
+ .name = "format",
+ .attrs = hisi_pa_pmu_v2_format_attr,
+};
+
+static struct attribute *hisi_pa_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_req, 0x40),
+ HISI_PMU_EVENT_ATTR(tx_req, 0x5c),
+ HISI_PMU_EVENT_ATTR(cycle, 0x78),
+ NULL
+};
+
+static const struct attribute_group hisi_pa_pmu_v2_events_group = {
+ .name = "events",
+ .attrs = hisi_pa_pmu_v2_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
+ .attrs = hisi_pa_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_pa_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
+ &hisi_pa_pmu_identifier_attr.attr,
+ NULL
+};
+
+static struct attribute_group hisi_pa_pmu_identifier_group = {
+ .attrs = hisi_pa_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
+ &hisi_pa_pmu_v2_format_group,
+ &hisi_pa_pmu_v2_events_group,
+ &hisi_pa_pmu_cpumask_attr_group,
+ &hisi_pa_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_pa_ops = {
+ .write_evtype = hisi_pa_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_pa_pmu_start_counters,
+ .stop_counters = hisi_pa_pmu_stop_counters,
+ .enable_counter = hisi_pa_pmu_enable_counter,
+ .disable_counter = hisi_pa_pmu_disable_counter,
+ .enable_counter_int = hisi_pa_pmu_enable_counter_int,
+ .disable_counter_int = hisi_pa_pmu_disable_counter_int,
+ .write_counter = hisi_pa_pmu_write_counter,
+ .read_counter = hisi_pa_pmu_read_counter,
+ .get_int_status = hisi_pa_pmu_get_int_status,
+ .clear_int_status = hisi_pa_pmu_clear_int_status,
+ .enable_filter = hisi_pa_pmu_enable_filter,
+ .disable_filter = hisi_pa_pmu_disable_filter,
+};
+
+static int hisi_pa_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *pa_pmu)
+{
+ int ret;
+
+ ret = hisi_pa_pmu_init_data(pdev, pa_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_uncore_pmu_init_irq(pa_pmu, pdev);
+ if (ret)
+ return ret;
+
+ pa_pmu->pmu_events.attr_groups = hisi_pa_pmu_v2_attr_groups;
+ pa_pmu->num_counters = PA_NR_COUNTERS;
+ pa_pmu->ops = &hisi_uncore_pa_ops;
+ pa_pmu->check_event = 0xB0;
+ pa_pmu->counter_bits = 64;
+ pa_pmu->dev = &pdev->dev;
+ pa_pmu->on_cpu = -1;
+
+ return 0;
+}
+
+static int hisi_pa_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *pa_pmu;
+ char *name;
+ int ret;
+
+ pa_pmu = devm_kzalloc(&pdev->dev, sizeof(*pa_pmu), GFP_KERNEL);
+ if (!pa_pmu)
+ return -ENOMEM;
+
+ ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu);
+ if (ret)
+ return ret;
+ /*
+ * PA is attached in SICL and the CPU core is chosen to manage this
+ * PMU which is the nearest SCCL, while its SCCL_ID is greater than
+ * one with the SICL_ID.
+ */
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u",
+ pa_pmu->sccl_id - 1, pa_pmu->index_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ &pa_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ pa_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = pa_pmu->pmu_events.attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ &pa_pmu->node);
+ irq_set_affinity_hint(pa_pmu->irq, NULL);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pa_pmu);
+ return ret;
+}
+
+static int hisi_pa_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&pa_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ &pa_pmu->node);
+ irq_set_affinity_hint(pa_pmu->irq, NULL);
+
+ return 0;
+}
+
+static struct platform_driver hisi_pa_pmu_driver = {
+ .driver = {
+ .name = "hisi_pa_pmu",
+ .acpi_match_table = hisi_pa_pmu_acpi_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_pa_pmu_probe,
+ .remove = hisi_pa_pmu_remove,
+};
+
+static int __init hisi_pa_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ "AP_PERF_ARM_HISI_PA_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("PA PMU: cpuhp state setup failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_pa_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
+
+ return ret;
+}
+module_init(hisi_pa_pmu_module_init);
+
+static void __exit hisi_pa_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_pa_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
+}
+module_exit(hisi_pa_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 9dbdc3fc3bb4..13c68b5e39c4 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -21,7 +21,7 @@
#include "hisi_uncore_pmu.h"
#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
-#define HISI_MAX_PERIOD(nr) (BIT_ULL(nr) - 1)
+#define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0))
/*
* PMU format attributes
@@ -33,7 +33,7 @@ ssize_t hisi_format_sysfs_show(struct device *dev,
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "%s\n", (char *)eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
@@ -47,7 +47,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev,
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
+ return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
@@ -59,7 +59,7 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
- return sprintf(buf, "%d\n", hisi_pmu->on_cpu);
+ return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
}
EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
@@ -96,12 +96,6 @@ static bool hisi_validate_event_group(struct perf_event *event)
return counters <= hisi_pmu->num_counters;
}
-int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx)
-{
- return idx >= 0 && idx < hisi_pmu->num_counters;
-}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_counter_valid);
-
int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
@@ -125,19 +119,68 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
- return snprintf(page, PAGE_SIZE, "0x%08x\n", hisi_pmu->identifier);
+ return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{
- if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) {
- dev_err(hisi_pmu->dev, "Unsupported event index:%d!\n", idx);
- return;
+ clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+}
+
+static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
+{
+ struct hisi_pmu *hisi_pmu = data;
+ struct perf_event *event;
+ unsigned long overflown;
+ int idx;
+
+ overflown = hisi_pmu->ops->get_int_status(hisi_pmu);
+ if (!overflown)
+ return IRQ_NONE;
+
+ /*
+ * Find the counter index which overflowed if the bit was set
+ * and handle it.
+ */
+ for_each_set_bit(idx, &overflown, hisi_pmu->num_counters) {
+ /* Write 1 to clear the IRQ status flag */
+ hisi_pmu->ops->clear_int_status(hisi_pmu, idx);
+ /* Get the corresponding event struct */
+ event = hisi_pmu->pmu_events.hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_uncore_pmu_event_update(event);
+ hisi_uncore_pmu_set_event_period(event);
}
- clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+ return IRQ_HANDLED;
+}
+
+int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), hisi_pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ: %d ret: %d.\n", irq, ret);
+ return ret;
+ }
+
+ hisi_pmu->irq = irq;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
int hisi_uncore_pmu_event_init(struct perf_event *event)
{
@@ -202,6 +245,9 @@ static void hisi_uncore_pmu_enable_event(struct perf_event *event)
hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
HISI_GET_EVENTID(event));
+ if (hisi_pmu->ops->enable_filter)
+ hisi_pmu->ops->enable_filter(event);
+
hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
}
@@ -216,6 +262,9 @@ static void hisi_uncore_pmu_disable_event(struct perf_event *event)
hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
+
+ if (hisi_pmu->ops->disable_filter)
+ hisi_pmu->ops->disable_filter(event);
}
void hisi_uncore_pmu_set_event_period(struct perf_event *event)
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 25b7cbe1f818..ea9d89bbc1ea 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -11,16 +11,19 @@
#ifndef __HISI_UNCORE_PMU_H__
#define __HISI_UNCORE_PMU_H__
+#include <linux/bitfield.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#undef pr_fmt
#define pr_fmt(fmt) "hisi_pmu: " fmt
+#define HISI_PMU_V2 0x30
#define HISI_MAX_COUNTERS 0x10
#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu))
@@ -34,6 +37,12 @@
#define HISI_PMU_EVENT_ATTR(_name, _config) \
HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config)
+#define HISI_PMU_EVENT_ATTR_EXTRACTOR(name, config, hi, lo) \
+ static inline u32 hisi_get_##name(struct perf_event *event) \
+ { \
+ return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \
+ }
+
struct hisi_pmu;
struct hisi_uncore_ops {
@@ -47,11 +56,16 @@ struct hisi_uncore_ops {
void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
void (*start_counters)(struct hisi_pmu *);
void (*stop_counters)(struct hisi_pmu *);
+ u32 (*get_int_status)(struct hisi_pmu *hisi_pmu);
+ void (*clear_int_status)(struct hisi_pmu *hisi_pmu, int idx);
+ void (*enable_filter)(struct perf_event *event);
+ void (*disable_filter)(struct perf_event *event);
};
struct hisi_pmu_hwevents {
struct perf_event *hw_events[HISI_MAX_COUNTERS];
DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS);
+ const struct attribute_group **attr_groups;
};
/* Generic pmu struct for different pmu types */
@@ -71,6 +85,8 @@ struct hisi_pmu {
void __iomem *base;
/* the ID of the PMU modules */
u32 index_id;
+ /* For DDRC PMU v2: each DDRC has more than one DMC */
+ u32 sub_id;
int num_counters;
int counter_bits;
/* check event code range */
@@ -78,7 +94,6 @@ struct hisi_pmu {
u32 identifier;
};
-int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx);
int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
void hisi_uncore_pmu_read(struct perf_event *event);
int hisi_uncore_pmu_add(struct perf_event *event, int flags);
@@ -102,6 +117,7 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page);
-
+int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
+ struct platform_device *pdev);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
new file mode 100644
index 000000000000..46be312fa126
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SLLC uncore Hardware event counters support
+ *
+ * Copyright (C) 2020 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ */
+#include <linux/acpi.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* SLLC register definition */
+#define SLLC_INT_MASK 0x0814
+#define SLLC_INT_STATUS 0x0818
+#define SLLC_INT_CLEAR 0x081c
+#define SLLC_PERF_CTRL 0x1c00
+#define SLLC_SRCID_CTRL 0x1c04
+#define SLLC_TGTID_CTRL 0x1c08
+#define SLLC_EVENT_CTRL 0x1c14
+#define SLLC_EVENT_TYPE0 0x1c18
+#define SLLC_VERSION 0x1cf0
+#define SLLC_EVENT_CNT0_L 0x1d00
+
+#define SLLC_EVTYPE_MASK 0xff
+#define SLLC_PERF_CTRL_EN BIT(0)
+#define SLLC_FILT_EN BIT(1)
+#define SLLC_TRACETAG_EN BIT(2)
+#define SLLC_SRCID_EN BIT(4)
+#define SLLC_SRCID_NONE 0x0
+#define SLLC_TGTID_EN BIT(5)
+#define SLLC_TGTID_NONE 0x0
+#define SLLC_TGTID_MIN_SHIFT 1
+#define SLLC_TGTID_MAX_SHIFT 12
+#define SLLC_SRCID_CMD_SHIFT 1
+#define SLLC_SRCID_MSK_SHIFT 12
+#define SLLC_NR_EVENTS 0x80
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_min, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_max, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
+
+static bool tgtid_is_valid(u32 max, u32 min)
+{
+ return max > 0 && max >= min;
+}
+
+static void hisi_sllc_pmu_enable_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val |= SLLC_TRACETAG_EN | SLLC_FILT_EN;
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_disable_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 tt_en = hisi_get_tracetag_en(event);
+
+ if (tt_en) {
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val &= ~(SLLC_TRACETAG_EN | SLLC_FILT_EN);
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_config_tgtid(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 min = hisi_get_tgtid_min(event);
+ u32 max = hisi_get_tgtid_max(event);
+
+ if (tgtid_is_valid(max, min)) {
+ u32 val = (max << SLLC_TGTID_MAX_SHIFT) | (min << SLLC_TGTID_MIN_SHIFT);
+
+ writel(val, sllc_pmu->base + SLLC_TGTID_CTRL);
+ /* Enable the tgtid */
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val |= SLLC_TGTID_EN | SLLC_FILT_EN;
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_clear_tgtid(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 min = hisi_get_tgtid_min(event);
+ u32 max = hisi_get_tgtid_max(event);
+
+ if (tgtid_is_valid(max, min)) {
+ u32 val;
+
+ writel(SLLC_TGTID_NONE, sllc_pmu->base + SLLC_TGTID_CTRL);
+ /* Disable the tgtid */
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val &= ~(SLLC_TGTID_EN | SLLC_FILT_EN);
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_config_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 val, msk;
+
+ msk = hisi_get_srcid_msk(event);
+ val = (cmd << SLLC_SRCID_CMD_SHIFT) | (msk << SLLC_SRCID_MSK_SHIFT);
+ writel(val, sllc_pmu->base + SLLC_SRCID_CTRL);
+ /* Enable the srcid */
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val |= SLLC_SRCID_EN | SLLC_FILT_EN;
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_clear_srcid(struct perf_event *event)
+{
+ struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ u32 cmd = hisi_get_srcid_cmd(event);
+
+ if (cmd) {
+ u32 val;
+
+ writel(SLLC_SRCID_NONE, sllc_pmu->base + SLLC_SRCID_CTRL);
+ /* Disable the srcid */
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val &= ~(SLLC_SRCID_EN | SLLC_FILT_EN);
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ }
+}
+
+static void hisi_sllc_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_sllc_pmu_enable_tracetag(event);
+ hisi_sllc_pmu_config_srcid(event);
+ hisi_sllc_pmu_config_tgtid(event);
+ }
+}
+
+static void hisi_sllc_pmu_clear_filter(struct perf_event *event)
+{
+ if (event->attr.config1 != 0x0) {
+ hisi_sllc_pmu_disable_tracetag(event);
+ hisi_sllc_pmu_clear_srcid(event);
+ hisi_sllc_pmu_clear_tgtid(event);
+ }
+}
+
+static u32 hisi_sllc_pmu_get_counter_offset(int idx)
+{
+ return (SLLC_EVENT_CNT0_L + idx * 8);
+}
+
+static u64 hisi_sllc_pmu_read_counter(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(sllc_pmu->base +
+ hisi_sllc_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_sllc_pmu_write_counter(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val, sllc_pmu->base +
+ hisi_sllc_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_sllc_pmu_write_evtype(struct hisi_pmu *sllc_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(SLLC_EVENT_TYPE0/1).
+ * There are 2 event select registers for the 8 hardware counters.
+ * Event code is 8-bits and for the former 4 hardware counters,
+ * SLLC_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+ * SLLC_EVENT_TYPE1 is chosen.
+ */
+ reg = SLLC_EVENT_TYPE0 + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ /* Write event code to SLLC_EVENT_TYPEx Register */
+ val = readl(sllc_pmu->base + reg);
+ val &= ~(SLLC_EVTYPE_MASK << shift);
+ val |= (type << shift);
+ writel(val, sllc_pmu->base + reg);
+}
+
+static void hisi_sllc_pmu_start_counters(struct hisi_pmu *sllc_pmu)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val |= SLLC_PERF_CTRL_EN;
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+}
+
+static void hisi_sllc_pmu_stop_counters(struct hisi_pmu *sllc_pmu)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val &= ~(SLLC_PERF_CTRL_EN);
+ writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+}
+
+static void hisi_sllc_pmu_enable_counter(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
+ val |= 1 << hwc->idx;
+ writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
+}
+
+static void hisi_sllc_pmu_disable_counter(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
+}
+
+static void hisi_sllc_pmu_enable_counter_int(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_INT_MASK);
+ /* Write 0 to enable interrupt */
+ val &= ~(1 << hwc->idx);
+ writel(val, sllc_pmu->base + SLLC_INT_MASK);
+}
+
+static void hisi_sllc_pmu_disable_counter_int(struct hisi_pmu *sllc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(sllc_pmu->base + SLLC_INT_MASK);
+ /* Write 1 to mask interrupt */
+ val |= 1 << hwc->idx;
+ writel(val, sllc_pmu->base + SLLC_INT_MASK);
+}
+
+static u32 hisi_sllc_pmu_get_int_status(struct hisi_pmu *sllc_pmu)
+{
+ return readl(sllc_pmu->base + SLLC_INT_STATUS);
+}
+
+static void hisi_sllc_pmu_clear_int_status(struct hisi_pmu *sllc_pmu, int idx)
+{
+ writel(1 << idx, sllc_pmu->base + SLLC_INT_CLEAR);
+}
+
+static const struct acpi_device_id hisi_sllc_pmu_acpi_match[] = {
+ { "HISI0263", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
+
+static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *sllc_pmu)
+{
+ /*
+ * Use the SCCL_ID and the index ID to identify the SLLC PMU,
+ * while SCCL_ID is from MPIDR_EL1 by CPU.
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &sllc_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Cannot read sccl-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+ &sllc_pmu->index_id)) {
+ dev_err(&pdev->dev, "Cannot read idx-id!\n");
+ return -EINVAL;
+ }
+
+ /* SLLC PMUs only share the same SCCL */
+ sllc_pmu->ccl_id = -1;
+
+ sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sllc_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for sllc_pmu resource.\n");
+ return PTR_ERR(sllc_pmu->base);
+ }
+
+ sllc_pmu->identifier = readl(sllc_pmu->base + SLLC_VERSION);
+
+ return 0;
+}
+
+static struct attribute *hisi_sllc_pmu_v2_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(tgtid_min, "config1:0-10"),
+ HISI_PMU_FORMAT_ATTR(tgtid_max, "config1:11-21"),
+ HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
+ HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
+ HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
+ NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_v2_format_group = {
+ .name = "format",
+ .attrs = hisi_sllc_pmu_v2_format_attr,
+};
+
+static struct attribute *hisi_sllc_pmu_v2_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_req, 0x30),
+ HISI_PMU_EVENT_ATTR(rx_data, 0x31),
+ HISI_PMU_EVENT_ATTR(tx_req, 0x34),
+ HISI_PMU_EVENT_ATTR(tx_data, 0x35),
+ HISI_PMU_EVENT_ATTR(cycles, 0x09),
+ NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_v2_events_group = {
+ .name = "events",
+ .attrs = hisi_sllc_pmu_v2_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = {
+ .attrs = hisi_sllc_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_sllc_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
+ &hisi_sllc_pmu_identifier_attr.attr,
+ NULL
+};
+
+static struct attribute_group hisi_sllc_pmu_identifier_group = {
+ .attrs = hisi_sllc_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
+ &hisi_sllc_pmu_v2_format_group,
+ &hisi_sllc_pmu_v2_events_group,
+ &hisi_sllc_pmu_cpumask_attr_group,
+ &hisi_sllc_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_sllc_ops = {
+ .write_evtype = hisi_sllc_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_sllc_pmu_start_counters,
+ .stop_counters = hisi_sllc_pmu_stop_counters,
+ .enable_counter = hisi_sllc_pmu_enable_counter,
+ .disable_counter = hisi_sllc_pmu_disable_counter,
+ .enable_counter_int = hisi_sllc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_sllc_pmu_disable_counter_int,
+ .write_counter = hisi_sllc_pmu_write_counter,
+ .read_counter = hisi_sllc_pmu_read_counter,
+ .get_int_status = hisi_sllc_pmu_get_int_status,
+ .clear_int_status = hisi_sllc_pmu_clear_int_status,
+ .enable_filter = hisi_sllc_pmu_enable_filter,
+ .disable_filter = hisi_sllc_pmu_clear_filter,
+};
+
+static int hisi_sllc_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *sllc_pmu)
+{
+ int ret;
+
+ ret = hisi_sllc_pmu_init_data(pdev, sllc_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_uncore_pmu_init_irq(sllc_pmu, pdev);
+ if (ret)
+ return ret;
+
+ sllc_pmu->pmu_events.attr_groups = hisi_sllc_pmu_v2_attr_groups;
+ sllc_pmu->ops = &hisi_uncore_sllc_ops;
+ sllc_pmu->check_event = SLLC_NR_EVENTS;
+ sllc_pmu->counter_bits = 64;
+ sllc_pmu->num_counters = 8;
+ sllc_pmu->dev = &pdev->dev;
+ sllc_pmu->on_cpu = -1;
+
+ return 0;
+}
+
+static int hisi_sllc_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *sllc_pmu;
+ char *name;
+ int ret;
+
+ sllc_pmu = devm_kzalloc(&pdev->dev, sizeof(*sllc_pmu), GFP_KERNEL);
+ if (!sllc_pmu)
+ return -ENOMEM;
+
+ ret = hisi_sllc_pmu_dev_probe(pdev, sllc_pmu);
+ if (ret)
+ return ret;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u",
+ sllc_pmu->sccl_id, sllc_pmu->index_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ &sllc_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ sllc_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = sllc_pmu->pmu_events.attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ &sllc_pmu->node);
+ irq_set_affinity_hint(sllc_pmu->irq, NULL);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, sllc_pmu);
+
+ return ret;
+}
+
+static int hisi_sllc_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&sllc_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ &sllc_pmu->node);
+ irq_set_affinity_hint(sllc_pmu->irq, NULL);
+
+ return 0;
+}
+
+static struct platform_driver hisi_sllc_pmu_driver = {
+ .driver = {
+ .name = "hisi_sllc_pmu",
+ .acpi_match_table = hisi_sllc_pmu_acpi_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_sllc_pmu_probe,
+ .remove = hisi_sllc_pmu_remove,
+};
+
+static int __init hisi_sllc_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ "AP_PERF_ARM_HISI_SLLC_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("SLLC PMU: cpuhp state setup failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_sllc_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE);
+
+ return ret;
+}
+module_init(hisi_sllc_pmu_module_init);
+
+static void __exit hisi_sllc_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_sllc_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE);
+}
+module_exit(hisi_sllc_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 8883af955a2a..fc54a80f9c5c 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -676,7 +676,7 @@ static ssize_t l2cache_pmu_event_show(struct device *dev,
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
- return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define L2CACHE_EVENT_ATTR(_name, _id) \
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index fb34b87b9471..bba078077c93 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -615,7 +615,7 @@ static ssize_t l3cache_pmu_format_show(struct device *dev,
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "%s\n", (char *) eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *) eattr->var);
}
#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \
@@ -643,7 +643,7 @@ static ssize_t l3cache_pmu_event_show(struct device *dev,
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
- return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define L3CACHE_EVENT_ATTR(_name, _id) \
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index e116815fa809..06a6d569b0b5 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -128,7 +128,7 @@ __tx2_pmu_##_var##_show(struct device *dev, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
- return sprintf(page, _format "\n"); \
+ return sysfs_emit(page, _format "\n"); \
} \
\
static struct device_attribute format_attr_##_var = \
@@ -176,7 +176,7 @@ static ssize_t tx2_pmu_event_show(struct device *dev,
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
+ return sysfs_emit(buf, "event=0x%lx\n", (unsigned long) eattr->var);
}
#define TX2_EVENT_ATTR(name, config) \
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 44faa51ba799..ffe3bdeec845 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -170,7 +170,7 @@ static ssize_t xgene_pmu_format_show(struct device *dev,
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "%s\n", (char *) eattr->var);
+ return sysfs_emit(buf, "%s\n", (char *) eattr->var);
}
#define XGENE_PMU_FORMAT_ATTR(_name, _config) \
@@ -281,7 +281,7 @@ static ssize_t xgene_pmu_event_show(struct device *dev,
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
+ return sysfs_emit(buf, "config=0x%lx\n", (unsigned long) eattr->var);
}
#define XGENE_PMU_EVENT_ATTR(_name, _config) \
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 7d3370289938..6e6825d17a1d 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
unsigned i, pin;
#ifdef CONFIG_GPIOLIB
struct pinctrl_gpio_range *range;
- unsigned int gpio_num;
struct gpio_chip *chip;
+ int gpio_num;
#endif
seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
@@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
seq_printf(s, "pin %d (%s) ", pin, desc->name);
#ifdef CONFIG_GPIOLIB
- gpio_num = 0;
+ gpio_num = -1;
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
if ((pin >= range->pin_base) &&
(pin < (range->pin_base + range->npins))) {
@@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
break;
}
}
- chip = gpio_to_chip(gpio_num);
- if (chip && chip->gpiodev && chip->gpiodev->base)
- seq_printf(s, "%u:%s ", gpio_num -
- chip->gpiodev->base, chip->label);
+ if (gpio_num >= 0)
+ chip = gpio_to_chip(gpio_num);
+ else
+ chip = NULL;
+ if (chip)
+ seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
else
seq_puts(s, "0:? ");
#endif
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 7fdf4257df1e..ad4b446d588e 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
static const struct intel_community lbg_communities[] = {
LBG_COMMUNITY(0, 0, 71),
LBG_COMMUNITY(1, 72, 132),
- LBG_COMMUNITY(3, 133, 144),
- LBG_COMMUNITY(4, 145, 180),
- LBG_COMMUNITY(5, 181, 246),
+ LBG_COMMUNITY(3, 133, 143),
+ LBG_COMMUNITY(4, 144, 178),
+ LBG_COMMUNITY(5, 179, 246),
};
static const struct intel_pinctrl_soc_data lbg_soc_data = {
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index 5d21c6adf1ab..1c7a288b59a5 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -208,7 +208,7 @@ static ssize_t secure_boot_fuse_state_show(struct device *dev,
* 0011 = version 1, 0111 = version 2, 1111 = version 3). Upper 4 bits
* are a thermometer code indicating key programming has completed for
* key n (same encodings as the start bits). This allows for detection
- * of an interruption in the progamming process which has left the key
+ * of an interruption in the programming process which has left the key
* partially programmed (and thus invalid). The process is to burn the
* eFuse for the new key start bit, burn the key eFuses, then burn the
* eFuse for the new key complete bit.
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index b013445147dd..a9db2f32658f 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, priv->irq,
mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
- | IRQF_SHARED, "mlxreg-hotplug", priv);
+ | IRQF_SHARED | IRQF_NO_AUTOEN,
+ "mlxreg-hotplug", priv);
if (err) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
return err;
}
- disable_irq(priv->irq);
spin_lock_init(&priv->lock);
INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
dev_set_drvdata(&pdev->dev, priv);
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 0847b2dc97bf..3105f651614f 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -77,6 +77,53 @@ config SURFACE_AGGREGATOR_CDEV
The provided interface is intended for debugging and development only,
and should not be used otherwise.
+config SURFACE_AGGREGATOR_REGISTRY
+ tristate "Surface System Aggregator Module Device Registry"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Device-registry and device-hubs for Surface System Aggregator Module
+ (SSAM) devices.
+
+ Provides a module and driver which act as a device-registry for SSAM
+ client devices that cannot be detected automatically, e.g. via ACPI.
+ Such devices are instead provided via this registry and attached via
+ device hubs, also provided in this module.
+
+ Devices provided via this registry are:
+ - Platform profile (performance-/cooling-mode) device (5th- and later
+ generations).
+ - Battery/AC devices (7th-generation).
+ - HID input devices (7th-generation).
+
+ Select M (recommended) or Y here if you want support for the above
+ mentioned devices on the corresponding Surface models. Without this
+ module, the respective devices will not be instantiated and thus any
+ functionality provided by them will be missing, even when drivers for
+ these devices are present. In other words, this module only provides
+ the respective client devices. Drivers for these devices still need to
+ be selected via the other options.
+
+config SURFACE_DTX
+ tristate "Surface DTX (Detachment System) Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on INPUT
+ help
+ Driver for the Surface Book clipboard detachment system (DTX).
+
+ On the Surface Book series devices, the display part containing the
+ CPU (called the clipboard) can be detached from the base (containing a
+ battery, the keyboard, and, optionally, a discrete GPU) by (if
+ necessary) unlocking and opening the latch connecting both parts.
+
+ This driver provides a user-space interface that can influence the
+ behavior of this process, which includes the option to abort it in
+ case the base is still in use or speed it up in case it is not.
+
+ Note that this module can be built without support for the Surface
+ Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case,
+ some devices, specifically the Surface Book 3, will not be supported.
+
config SURFACE_GPE
tristate "Surface GPE/Lid Support Driver"
depends on DMI
@@ -105,6 +152,28 @@ config SURFACE_HOTPLUG
Select M or Y here, if you want to (fully) support hot-plugging of
dGPU devices on the Surface Book 2 and/or 3 during D3cold.
+config SURFACE_PLATFORM_PROFILE
+ tristate "Surface Platform Profile Driver"
+ depends on SURFACE_AGGREGATOR_REGISTRY
+ select ACPI_PLATFORM_PROFILE
+ help
+ Provides support for the ACPI platform profile on 5th- and later
+ generation Microsoft Surface devices.
+
+ More specifically, this driver provides ACPI platform profile support
+ on Microsoft Surface devices with a Surface System Aggregator Module
+ (SSAM) connected via the Surface Serial Hub (SSH / SAM-over-SSH). In
+ other words, this driver provides platform profile support on the
+ Surface Pro 5, Surface Book 2, Surface Laptop, Surface Laptop Go and
+ later. On those devices, the platform profile can significantly
+ influence cooling behavior, e.g. setting it to 'quiet' (default) or
+ 'low-power' can significantly limit performance of the discrete GPU on
+ Surface Books, while in turn leading to lower power consumption and/or
+ less fan noise.
+
+ Select M or Y here, if you want to include ACPI platform profile
+ support on the above mentioned devices.
+
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
depends on INPUT
diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
index 990424c5f0c9..32889482de55 100644
--- a/drivers/platform/surface/Makefile
+++ b/drivers/platform/surface/Makefile
@@ -10,6 +10,9 @@ obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o
obj-$(CONFIG_SURFACE_AGGREGATOR) += aggregator/
obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o
obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o
obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o
+obj-$(CONFIG_SURFACE_PLATFORM_PROFILE) += surface_platform_profile.o
obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 5bcb59ed579d..69e86cd599d3 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -1040,7 +1040,7 @@ static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
union acpi_object *obj;
u64 val;
- if (!(funcs & BIT(func)))
+ if (!(funcs & BIT_ULL(func)))
return 0; /* Not supported, leave *ret at its default value */
obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
@@ -1750,35 +1750,35 @@ EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
/* -- Internal SAM requests. ------------------------------------------------ */
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x13,
.instance_id = 0x00,
});
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x15,
.instance_id = 0x00,
});
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x16,
.instance_id = 0x00,
});
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x33,
.instance_id = 0x00,
});
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = 0x01,
.command_id = 0x34,
@@ -2483,7 +2483,8 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
* interrupt, and let the SAM resume callback during the controller
* resume process clear it.
*/
- const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+ const int irqf = IRQF_SHARED | IRQF_ONESHOT |
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
if (IS_ERR(gpiod))
@@ -2501,7 +2502,6 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
return status;
ctrl->irq.num = irq;
- disable_irq(ctrl->irq.num);
return 0;
}
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
new file mode 100644
index 000000000000..685d37a7add1
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) client device registry.
+ *
+ * Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that
+ * cannot be auto-detected. Provides device-hubs and performs instantiation
+ * for these devices.
+ *
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- Device registry. ------------------------------------------------------ */
+
+/*
+ * SSAM device names follow the SSAM module alias, meaning they are prefixed
+ * with 'ssam:', followed by domain, category, target ID, instance ID, and
+ * function, each encoded as two-digit hexadecimal, separated by ':'. In other
+ * words, it follows the scheme
+ *
+ * ssam:dd:cc:tt:ii:ff
+ *
+ * Where, 'dd', 'cc', 'tt', 'ii', and 'ff' are the two-digit hexadecimal
+ * values mentioned above, respectively.
+ */
+
+/* Root node. */
+static const struct software_node ssam_node_root = {
+ .name = "ssam_platform_hub",
+};
+
+/* Base device hub (devices attached to Surface Book 3 base). */
+static const struct software_node ssam_node_hub_base = {
+ .name = "ssam:00:00:02:00:00",
+ .parent = &ssam_node_root,
+};
+
+/* AC adapter. */
+static const struct software_node ssam_node_bat_ac = {
+ .name = "ssam:01:02:01:01:01",
+ .parent = &ssam_node_root,
+};
+
+/* Primary battery. */
+static const struct software_node ssam_node_bat_main = {
+ .name = "ssam:01:02:01:01:00",
+ .parent = &ssam_node_root,
+};
+
+/* Secondary battery (Surface Book 3). */
+static const struct software_node ssam_node_bat_sb3base = {
+ .name = "ssam:01:02:02:01:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* Platform profile / performance-mode device. */
+static const struct software_node ssam_node_tmp_pprof = {
+ .name = "ssam:01:03:01:00:01",
+ .parent = &ssam_node_root,
+};
+
+/* DTX / detachment-system device (Surface Book 3). */
+static const struct software_node ssam_node_bas_dtx = {
+ .name = "ssam:01:11:01:00:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID keyboard. */
+static const struct software_node ssam_node_hid_main_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID touchpad. */
+static const struct software_node ssam_node_hid_main_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID device instance 5 (unknown HID device). */
+static const struct software_node ssam_node_hid_main_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID keyboard (base hub). */
+static const struct software_node ssam_node_hid_base_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID touchpad (base hub). */
+static const struct software_node ssam_node_hid_base_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID device instance 5 (unknown HID device, base hub). */
+static const struct software_node ssam_node_hid_base_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* HID device instance 6 (unknown HID device, base hub). */
+static const struct software_node ssam_node_hid_base_iid6 = {
+ .name = "ssam:01:15:02:06:00",
+ .parent = &ssam_node_hub_base,
+};
+
+/* Devices for Surface Book 2. */
+static const struct software_node *ssam_node_group_sb2[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Book 3. */
+static const struct software_node *ssam_node_group_sb3[] = {
+ &ssam_node_root,
+ &ssam_node_hub_base,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_bat_sb3base,
+ &ssam_node_tmp_pprof,
+ &ssam_node_bas_dtx,
+ &ssam_node_hid_base_keyboard,
+ &ssam_node_hid_base_touchpad,
+ &ssam_node_hid_base_iid5,
+ &ssam_node_hid_base_iid6,
+ NULL,
+};
+
+/* Devices for Surface Laptop 1. */
+static const struct software_node *ssam_node_group_sl1[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Laptop 2. */
+static const struct software_node *ssam_node_group_sl2[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Laptop 3. */
+static const struct software_node *ssam_node_group_sl3[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ &ssam_node_hid_main_keyboard,
+ &ssam_node_hid_main_touchpad,
+ &ssam_node_hid_main_iid5,
+ NULL,
+};
+
+/* Devices for Surface Laptop Go. */
+static const struct software_node *ssam_node_group_slg1[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Pro 5. */
+static const struct software_node *ssam_node_group_sp5[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Pro 6. */
+static const struct software_node *ssam_node_group_sp6[] = {
+ &ssam_node_root,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+/* Devices for Surface Pro 7 and Surface Pro 7+. */
+static const struct software_node *ssam_node_group_sp7[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ NULL,
+};
+
+
+/* -- Device registry helper functions. ------------------------------------- */
+
+static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
+{
+ u8 d, tc, tid, iid, fn;
+ int n;
+
+ n = sscanf(str, "ssam:%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
+ if (n != 5)
+ return -EINVAL;
+
+ uid->domain = d;
+ uid->category = tc;
+ uid->target = tid;
+ uid->instance = iid;
+ uid->function = fn;
+
+ return 0;
+}
+
+static int ssam_hub_remove_devices_fn(struct device *dev, void *data)
+{
+ if (!is_ssam_device(dev))
+ return 0;
+
+ ssam_device_remove(to_ssam_device(dev));
+ return 0;
+}
+
+static void ssam_hub_remove_devices(struct device *parent)
+{
+ device_for_each_child_reverse(parent, NULL, ssam_hub_remove_devices_fn);
+}
+
+static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct ssam_device_uid uid;
+ struct ssam_device *sdev;
+ int status;
+
+ status = ssam_uid_from_string(fwnode_get_name(node), &uid);
+ if (status)
+ return status;
+
+ sdev = ssam_device_alloc(ctrl, uid);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev.parent = parent;
+ sdev->dev.fwnode = node;
+
+ status = ssam_device_add(sdev);
+ if (status)
+ ssam_device_put(sdev);
+
+ return status;
+}
+
+static int ssam_hub_add_devices(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct fwnode_handle *child;
+ int status;
+
+ fwnode_for_each_child_node(node, child) {
+ /*
+ * Try to add the device specified in the firmware node. If
+ * this fails with -EINVAL, the node does not specify any SSAM
+ * device, so ignore it and continue with the next one.
+ */
+
+ status = ssam_hub_add_device(parent, ctrl, child);
+ if (status && status != -EINVAL)
+ goto err;
+ }
+
+ return 0;
+err:
+ ssam_hub_remove_devices(parent);
+ return status;
+}
+
+
+/* -- SSAM base-hub driver. ------------------------------------------------- */
+
+/*
+ * Some devices (especially battery) may need a bit of time to be fully usable
+ * after being (re-)connected. This delay has been determined via
+ * experimentation.
+ */
+#define SSAM_BASE_UPDATE_CONNECT_DELAY msecs_to_jiffies(2500)
+
+enum ssam_base_hub_state {
+ SSAM_BASE_HUB_UNINITIALIZED,
+ SSAM_BASE_HUB_CONNECTED,
+ SSAM_BASE_HUB_DISCONNECTED,
+};
+
+struct ssam_base_hub {
+ struct ssam_device *sdev;
+
+ enum ssam_base_hub_state state;
+ struct delayed_work update_work;
+
+ struct ssam_event_notifier notif;
+};
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+#define SSAM_BAS_OPMODE_TABLET 0x00
+#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
+
+static int ssam_base_hub_query_state(struct ssam_base_hub *hub, enum ssam_base_hub_state *state)
+{
+ u8 opmode;
+ int status;
+
+ status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
+ return status;
+ }
+
+ if (opmode != SSAM_BAS_OPMODE_TABLET)
+ *state = SSAM_BASE_HUB_CONNECTED;
+ else
+ *state = SSAM_BASE_HUB_DISCONNECTED;
+
+ return 0;
+}
+
+static ssize_t ssam_base_hub_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ssam_base_hub *hub = dev_get_drvdata(dev);
+ bool connected = hub->state == SSAM_BASE_HUB_CONNECTED;
+
+ return sysfs_emit(buf, "%d\n", connected);
+}
+
+static struct device_attribute ssam_base_hub_attr_state =
+ __ATTR(state, 0444, ssam_base_hub_state_show, NULL);
+
+static struct attribute *ssam_base_hub_attrs[] = {
+ &ssam_base_hub_attr_state.attr,
+ NULL,
+};
+
+static const struct attribute_group ssam_base_hub_group = {
+ .attrs = ssam_base_hub_attrs,
+};
+
+static void ssam_base_hub_update_workfn(struct work_struct *work)
+{
+ struct ssam_base_hub *hub = container_of(work, struct ssam_base_hub, update_work.work);
+ struct fwnode_handle *node = dev_fwnode(&hub->sdev->dev);
+ enum ssam_base_hub_state state;
+ int status = 0;
+
+ status = ssam_base_hub_query_state(hub, &state);
+ if (status)
+ return;
+
+ if (hub->state == state)
+ return;
+ hub->state = state;
+
+ if (hub->state == SSAM_BASE_HUB_CONNECTED)
+ status = ssam_hub_add_devices(&hub->sdev->dev, hub->sdev->ctrl, node);
+ else
+ ssam_hub_remove_devices(&hub->sdev->dev);
+
+ if (status)
+ dev_err(&hub->sdev->dev, "failed to update base-hub devices: %d\n", status);
+}
+
+static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_base_hub *hub = container_of(nf, struct ssam_base_hub, notif);
+ unsigned long delay;
+
+ if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
+ return 0;
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ /*
+ * Delay update when the base is being connected to give devices/EC
+ * some time to set up.
+ */
+ delay = event->data[0] ? SSAM_BASE_UPDATE_CONNECT_DELAY : 0;
+
+ schedule_delayed_work(&hub->update_work, delay);
+
+ /*
+ * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
+ * consumed by the detachment system driver. We're just a (more or less)
+ * silent observer.
+ */
+ return 0;
+}
+
+static int __maybe_unused ssam_base_hub_resume(struct device *dev)
+{
+ struct ssam_base_hub *hub = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
+
+static int ssam_base_hub_probe(struct ssam_device *sdev)
+{
+ struct ssam_base_hub *hub;
+ int status;
+
+ hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->sdev = sdev;
+ hub->state = SSAM_BASE_HUB_UNINITIALIZED;
+
+ hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
+ hub->notif.base.fn = ssam_base_hub_notif;
+ hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
+ hub->notif.event.id.instance = 0,
+ hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
+ hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ INIT_DELAYED_WORK(&hub->update_work, ssam_base_hub_update_workfn);
+
+ ssam_device_set_drvdata(sdev, hub);
+
+ status = ssam_notifier_register(sdev->ctrl, &hub->notif);
+ if (status)
+ return status;
+
+ status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
+ if (status)
+ goto err;
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+
+err:
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+ cancel_delayed_work_sync(&hub->update_work);
+ ssam_hub_remove_devices(&sdev->dev);
+ return status;
+}
+
+static void ssam_base_hub_remove(struct ssam_device *sdev)
+{
+ struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
+
+ sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
+
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+ cancel_delayed_work_sync(&hub->update_work);
+ ssam_hub_remove_devices(&sdev->dev);
+}
+
+static const struct ssam_device_id ssam_base_hub_match[] = {
+ { SSAM_VDEV(HUB, 0x02, SSAM_ANY_IID, 0x00) },
+ { },
+};
+
+static struct ssam_device_driver ssam_base_hub_driver = {
+ .probe = ssam_base_hub_probe,
+ .remove = ssam_base_hub_remove,
+ .match_table = ssam_base_hub_match,
+ .driver = {
+ .name = "surface_aggregator_base_hub",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_base_hub_pm_ops,
+ },
+};
+
+
+/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
+
+static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Pro 4, 5, and 6 (OMBR < 0x10) */
+ { "MSHW0081", (unsigned long)ssam_node_group_sp5 },
+
+ /* Surface Pro 6 (OMBR >= 0x10) */
+ { "MSHW0111", (unsigned long)ssam_node_group_sp6 },
+
+ /* Surface Pro 7 */
+ { "MSHW0116", (unsigned long)ssam_node_group_sp7 },
+
+ /* Surface Pro 7+ */
+ { "MSHW0119", (unsigned long)ssam_node_group_sp7 },
+
+ /* Surface Book 2 */
+ { "MSHW0107", (unsigned long)ssam_node_group_sb2 },
+
+ /* Surface Book 3 */
+ { "MSHW0117", (unsigned long)ssam_node_group_sb3 },
+
+ /* Surface Laptop 1 */
+ { "MSHW0086", (unsigned long)ssam_node_group_sl1 },
+
+ /* Surface Laptop 2 */
+ { "MSHW0112", (unsigned long)ssam_node_group_sl2 },
+
+ /* Surface Laptop 3 (13", Intel) */
+ { "MSHW0114", (unsigned long)ssam_node_group_sl3 },
+
+ /* Surface Laptop 3 (15", AMD) */
+ { "MSHW0110", (unsigned long)ssam_node_group_sl3 },
+
+ /* Surface Laptop Go 1 */
+ { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
+
+static int ssam_platform_hub_probe(struct platform_device *pdev)
+{
+ const struct software_node **nodes;
+ struct ssam_controller *ctrl;
+ struct fwnode_handle *root;
+ int status;
+
+ nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
+ if (!nodes)
+ return -ENODEV;
+
+ /*
+ * As we're adding the SSAM client devices as children under this device
+ * and not the SSAM controller, we need to add a device link to the
+ * controller to ensure that we remove all of our devices before the
+ * controller is removed. This also guarantees proper ordering for
+ * suspend/resume of the devices on this hub.
+ */
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ status = software_node_register_node_group(nodes);
+ if (status)
+ return status;
+
+ root = software_node_fwnode(&ssam_node_root);
+ if (!root) {
+ software_node_unregister_node_group(nodes);
+ return -ENOENT;
+ }
+
+ set_secondary_fwnode(&pdev->dev, root);
+
+ status = ssam_hub_add_devices(&pdev->dev, ctrl, root);
+ if (status) {
+ set_secondary_fwnode(&pdev->dev, NULL);
+ software_node_unregister_node_group(nodes);
+ }
+
+ platform_set_drvdata(pdev, nodes);
+ return status;
+}
+
+static int ssam_platform_hub_remove(struct platform_device *pdev)
+{
+ const struct software_node **nodes = platform_get_drvdata(pdev);
+
+ ssam_hub_remove_devices(&pdev->dev);
+ set_secondary_fwnode(&pdev->dev, NULL);
+ software_node_unregister_node_group(nodes);
+ return 0;
+}
+
+static struct platform_driver ssam_platform_hub_driver = {
+ .probe = ssam_platform_hub_probe,
+ .remove = ssam_platform_hub_remove,
+ .driver = {
+ .name = "surface_aggregator_platform_hub",
+ .acpi_match_table = ssam_platform_hub_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- Module initialization. ------------------------------------------------ */
+
+static int __init ssam_device_hub_init(void)
+{
+ int status;
+
+ status = platform_driver_register(&ssam_platform_hub_driver);
+ if (status)
+ return status;
+
+ status = ssam_device_driver_register(&ssam_base_hub_driver);
+ if (status)
+ platform_driver_unregister(&ssam_platform_hub_driver);
+
+ return status;
+}
+module_init(ssam_device_hub_init);
+
+static void __exit ssam_device_hub_exit(void)
+{
+ ssam_device_driver_unregister(&ssam_base_hub_driver);
+ platform_driver_unregister(&ssam_platform_hub_driver);
+}
+module_exit(ssam_device_hub_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
new file mode 100644
index 000000000000..63ce587e79e3
--- /dev/null
+++ b/drivers/platform/surface/surface_dtx.c
@@ -0,0 +1,1289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Book (gen. 2 and later) detachment system (DTX) driver.
+ *
+ * Provides a user-space interface to properly handle clipboard/tablet
+ * (containing screen and processor) detachment from the base of the device
+ * (containing the keyboard and optionally a discrete GPU). Allows to
+ * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
+ * use), or request detachment via user-space.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+#include <linux/surface_aggregator/dtx.h>
+
+
+/* -- SSAM interface. ------------------------------------------------------- */
+
+enum sam_event_cid_bas {
+ SAM_EVENT_CID_DTX_CONNECTION = 0x0c,
+ SAM_EVENT_CID_DTX_REQUEST = 0x0e,
+ SAM_EVENT_CID_DTX_CANCEL = 0x0f,
+ SAM_EVENT_CID_DTX_LATCH_STATUS = 0x11,
+};
+
+enum ssam_bas_base_state {
+ SSAM_BAS_BASE_STATE_DETACH_SUCCESS = 0x00,
+ SSAM_BAS_BASE_STATE_ATTACHED = 0x01,
+ SSAM_BAS_BASE_STATE_NOT_FEASIBLE = 0x02,
+};
+
+enum ssam_bas_latch_status {
+ SSAM_BAS_LATCH_STATUS_CLOSED = 0x00,
+ SSAM_BAS_LATCH_STATUS_OPENED = 0x01,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN = 0x02,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN = 0x03,
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE = 0x04,
+};
+
+enum ssam_bas_cancel_reason {
+ SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE = 0x00, /* Low battery. */
+ SSAM_BAS_CANCEL_REASON_TIMEOUT = 0x02,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN = 0x03,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN = 0x04,
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE = 0x05,
+};
+
+struct ssam_bas_base_info {
+ u8 state;
+ u8 base_id;
+} __packed;
+
+static_assert(sizeof(struct ssam_bas_base_info) == 2);
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x06,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x07,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x08,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x09,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0a,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0b,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0c,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x11,
+ .instance_id = 0x00,
+});
+
+
+/* -- Main structures. ------------------------------------------------------ */
+
+enum sdtx_device_state {
+ SDTX_DEVICE_SHUTDOWN_BIT = BIT(0),
+ SDTX_DEVICE_DIRTY_BASE_BIT = BIT(1),
+ SDTX_DEVICE_DIRTY_MODE_BIT = BIT(2),
+ SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
+};
+
+struct sdtx_device {
+ struct kref kref;
+ struct rw_semaphore lock; /* Guards device and controller reference. */
+
+ struct device *dev;
+ struct ssam_controller *ctrl;
+ unsigned long flags;
+
+ struct miscdevice mdev;
+ wait_queue_head_t waitq;
+ struct mutex write_lock; /* Guards order of events/notifications. */
+ struct rw_semaphore client_lock; /* Guards client list. */
+ struct list_head client_list;
+
+ struct delayed_work state_work;
+ struct {
+ struct ssam_bas_base_info base;
+ u8 device_mode;
+ u8 latch_status;
+ } state;
+
+ struct delayed_work mode_work;
+ struct input_dev *mode_switch;
+
+ struct ssam_event_notifier notif;
+};
+
+enum sdtx_client_state {
+ SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
+};
+
+struct sdtx_client {
+ struct sdtx_device *ddev;
+ struct list_head node;
+ unsigned long flags;
+
+ struct fasync_struct *fasync;
+
+ struct mutex read_lock; /* Guards FIFO buffer read access. */
+ DECLARE_KFIFO(buffer, u8, 512);
+};
+
+static void __sdtx_device_release(struct kref *kref)
+{
+ struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
+
+ mutex_destroy(&ddev->write_lock);
+ kfree(ddev);
+}
+
+static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
+{
+ if (ddev)
+ kref_get(&ddev->kref);
+
+ return ddev;
+}
+
+static void sdtx_device_put(struct sdtx_device *ddev)
+{
+ if (ddev)
+ kref_put(&ddev->kref, __sdtx_device_release);
+}
+
+
+/* -- Firmware value translations. ------------------------------------------ */
+
+static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
+{
+ switch (state) {
+ case SSAM_BAS_BASE_STATE_ATTACHED:
+ return SDTX_BASE_ATTACHED;
+
+ case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
+ return SDTX_BASE_DETACHED;
+
+ case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
+ return SDTX_DETACH_NOT_FEASIBLE;
+
+ default:
+ dev_err(ddev->dev, "unknown base state: %#04x\n", state);
+ return SDTX_UNKNOWN(state);
+ }
+}
+
+static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
+{
+ switch (status) {
+ case SSAM_BAS_LATCH_STATUS_CLOSED:
+ return SDTX_LATCH_CLOSED;
+
+ case SSAM_BAS_LATCH_STATUS_OPENED:
+ return SDTX_LATCH_OPENED;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
+ return SDTX_ERR_FAILED_TO_OPEN;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
+ return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
+
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
+ return SDTX_ERR_FAILED_TO_CLOSE;
+
+ default:
+ dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
+ return SDTX_UNKNOWN(status);
+ }
+}
+
+static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
+{
+ switch (reason) {
+ case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
+ return SDTX_DETACH_NOT_FEASIBLE;
+
+ case SSAM_BAS_CANCEL_REASON_TIMEOUT:
+ return SDTX_DETACH_TIMEDOUT;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
+ return SDTX_ERR_FAILED_TO_OPEN;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
+ return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
+
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
+ return SDTX_ERR_FAILED_TO_CLOSE;
+
+ default:
+ dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
+ return SDTX_UNKNOWN(reason);
+ }
+}
+
+
+/* -- IOCTLs. --------------------------------------------------------------- */
+
+static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
+ struct sdtx_base_info __user *buf)
+{
+ struct ssam_bas_base_info raw;
+ struct sdtx_base_info info;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
+ if (status < 0)
+ return status;
+
+ info.state = sdtx_translate_base_state(ddev, raw.state);
+ info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
+
+ if (copy_to_user(buf, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
+{
+ u8 mode;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status < 0)
+ return status;
+
+ return put_user(mode, buf);
+}
+
+static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
+{
+ u8 latch;
+ int status;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
+ if (status < 0)
+ return status;
+
+ return put_user(sdtx_translate_latch_status(ddev, latch), buf);
+}
+
+static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
+{
+ struct sdtx_device *ddev = client->ddev;
+
+ lockdep_assert_held_read(&ddev->lock);
+
+ switch (cmd) {
+ case SDTX_IOCTL_EVENTS_ENABLE:
+ set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
+ return 0;
+
+ case SDTX_IOCTL_EVENTS_DISABLE:
+ clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
+ return 0;
+
+ case SDTX_IOCTL_LATCH_LOCK:
+ return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_UNLOCK:
+ return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_REQUEST:
+ return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_CONFIRM:
+ return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_HEARTBEAT:
+ return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
+
+ case SDTX_IOCTL_LATCH_CANCEL:
+ return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
+
+ case SDTX_IOCTL_GET_BASE_INFO:
+ return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
+
+ case SDTX_IOCTL_GET_DEVICE_MODE:
+ return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
+
+ case SDTX_IOCTL_GET_LATCH_STATUS:
+ return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct sdtx_client *client = file->private_data;
+ long status;
+
+ if (down_read_killable(&client->ddev->lock))
+ return -ERESTARTSYS;
+
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
+ up_read(&client->ddev->lock);
+ return -ENODEV;
+ }
+
+ status = __surface_dtx_ioctl(client, cmd, arg);
+
+ up_read(&client->ddev->lock);
+ return status;
+}
+
+
+/* -- File operations. ------------------------------------------------------ */
+
+static int surface_dtx_open(struct inode *inode, struct file *file)
+{
+ struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
+ struct sdtx_client *client;
+
+ /* Initialize client. */
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->ddev = sdtx_device_get(ddev);
+
+ INIT_LIST_HEAD(&client->node);
+
+ mutex_init(&client->read_lock);
+ INIT_KFIFO(client->buffer);
+
+ file->private_data = client;
+
+ /* Attach client. */
+ down_write(&ddev->client_lock);
+
+ /*
+ * Do not add a new client if the device has been shut down. Note that
+ * it's enough to hold the client_lock here as, during shutdown, we
+ * only acquire that lock and remove clients after marking the device
+ * as shut down.
+ */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_write(&ddev->client_lock);
+ sdtx_device_put(client->ddev);
+ kfree(client);
+ return -ENODEV;
+ }
+
+ list_add_tail(&client->node, &ddev->client_list);
+ up_write(&ddev->client_lock);
+
+ stream_open(inode, file);
+ return 0;
+}
+
+static int surface_dtx_release(struct inode *inode, struct file *file)
+{
+ struct sdtx_client *client = file->private_data;
+
+ /* Detach client. */
+ down_write(&client->ddev->client_lock);
+ list_del(&client->node);
+ up_write(&client->ddev->client_lock);
+
+ /* Free client. */
+ sdtx_device_put(client->ddev);
+ mutex_destroy(&client->read_lock);
+ kfree(client);
+
+ return 0;
+}
+
+static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
+{
+ struct sdtx_client *client = file->private_data;
+ struct sdtx_device *ddev = client->ddev;
+ unsigned int copied;
+ int status = 0;
+
+ if (down_read_killable(&ddev->lock))
+ return -ERESTARTSYS;
+
+ /* Make sure we're not shut down. */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_read(&ddev->lock);
+ return -ENODEV;
+ }
+
+ do {
+ /* Check availability, wait if necessary. */
+ if (kfifo_is_empty(&client->buffer)) {
+ up_read(&ddev->lock);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ status = wait_event_interruptible(ddev->waitq,
+ !kfifo_is_empty(&client->buffer) ||
+ test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
+ &ddev->flags));
+ if (status < 0)
+ return status;
+
+ if (down_read_killable(&ddev->lock))
+ return -ERESTARTSYS;
+
+ /* Need to check that we're not shut down again. */
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+ up_read(&ddev->lock);
+ return -ENODEV;
+ }
+ }
+
+ /* Try to read from FIFO. */
+ if (mutex_lock_interruptible(&client->read_lock)) {
+ up_read(&ddev->lock);
+ return -ERESTARTSYS;
+ }
+
+ status = kfifo_to_user(&client->buffer, buf, count, &copied);
+ mutex_unlock(&client->read_lock);
+
+ if (status < 0) {
+ up_read(&ddev->lock);
+ return status;
+ }
+
+ /* We might not have gotten anything, check this here. */
+ if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
+ up_read(&ddev->lock);
+ return -EAGAIN;
+ }
+ } while (copied == 0);
+
+ up_read(&ddev->lock);
+ return copied;
+}
+
+static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
+{
+ struct sdtx_client *client = file->private_data;
+ __poll_t events = 0;
+
+ if (down_read_killable(&client->ddev->lock))
+ return -ERESTARTSYS;
+
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
+ up_read(&client->ddev->lock);
+ return EPOLLHUP | EPOLLERR;
+ }
+
+ poll_wait(file, &client->ddev->waitq, pt);
+
+ if (!kfifo_is_empty(&client->buffer))
+ events |= EPOLLIN | EPOLLRDNORM;
+
+ up_read(&client->ddev->lock);
+ return events;
+}
+
+static int surface_dtx_fasync(int fd, struct file *file, int on)
+{
+ struct sdtx_client *client = file->private_data;
+
+ return fasync_helper(fd, file, on, &client->fasync);
+}
+
+static const struct file_operations surface_dtx_fops = {
+ .owner = THIS_MODULE,
+ .open = surface_dtx_open,
+ .release = surface_dtx_release,
+ .read = surface_dtx_read,
+ .poll = surface_dtx_poll,
+ .fasync = surface_dtx_fasync,
+ .unlocked_ioctl = surface_dtx_ioctl,
+ .compat_ioctl = surface_dtx_ioctl,
+ .llseek = no_llseek,
+};
+
+
+/* -- Event handling/forwarding. -------------------------------------------- */
+
+/*
+ * The device operation mode is not immediately updated on the EC when the
+ * base has been connected, i.e. querying the device mode inside the
+ * connection event callback yields an outdated value. Thus, we can only
+ * determine the new tablet-mode switch and device mode values after some
+ * time.
+ *
+ * These delays have been chosen by experimenting. We first delay on connect
+ * events, then check and validate the device mode against the base state and
+ * if invalid delay again by the "recheck" delay.
+ */
+#define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100)
+#define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100)
+
+struct sdtx_status_event {
+ struct sdtx_event e;
+ __u16 v;
+} __packed;
+
+struct sdtx_base_info_event {
+ struct sdtx_event e;
+ struct sdtx_base_info v;
+} __packed;
+
+union sdtx_generic_event {
+ struct sdtx_event common;
+ struct sdtx_status_event status;
+ struct sdtx_base_info_event base;
+};
+
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
+
+/* Must be executed with ddev->write_lock held. */
+static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
+{
+ const size_t len = sizeof(struct sdtx_event) + evt->length;
+ struct sdtx_client *client;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ down_read(&ddev->client_lock);
+ list_for_each_entry(client, &ddev->client_list, node) {
+ if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
+ continue;
+
+ if (likely(kfifo_avail(&client->buffer) >= len))
+ kfifo_in(&client->buffer, (const u8 *)evt, len);
+ else
+ dev_warn(ddev->dev, "event buffer overrun\n");
+
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ }
+ up_read(&ddev->client_lock);
+
+ wake_up_interruptible(&ddev->waitq);
+}
+
+static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
+{
+ struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
+ union sdtx_generic_event event;
+ size_t len;
+
+ /* Validate event payload length. */
+ switch (in->command_id) {
+ case SAM_EVENT_CID_DTX_CONNECTION:
+ len = 2 * sizeof(u8);
+ break;
+
+ case SAM_EVENT_CID_DTX_REQUEST:
+ len = 0;
+ break;
+
+ case SAM_EVENT_CID_DTX_CANCEL:
+ len = sizeof(u8);
+ break;
+
+ case SAM_EVENT_CID_DTX_LATCH_STATUS:
+ len = sizeof(u8);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (in->length != len) {
+ dev_err(ddev->dev,
+ "unexpected payload size for event %#04x: got %u, expected %zu\n",
+ in->command_id, in->length, len);
+ return 0;
+ }
+
+ mutex_lock(&ddev->write_lock);
+
+ /* Translate event. */
+ switch (in->command_id) {
+ case SAM_EVENT_CID_DTX_CONNECTION:
+ clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
+
+ /* If state has not changed: do not send new event. */
+ if (ddev->state.base.state == in->data[0] &&
+ ddev->state.base.base_id == in->data[1])
+ goto out;
+
+ ddev->state.base.state = in->data[0];
+ ddev->state.base.base_id = in->data[1];
+
+ event.base.e.length = sizeof(struct sdtx_base_info);
+ event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
+ event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
+ break;
+
+ case SAM_EVENT_CID_DTX_REQUEST:
+ event.common.code = SDTX_EVENT_REQUEST;
+ event.common.length = 0;
+ break;
+
+ case SAM_EVENT_CID_DTX_CANCEL:
+ event.status.e.length = sizeof(u16);
+ event.status.e.code = SDTX_EVENT_CANCEL;
+ event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
+ break;
+
+ case SAM_EVENT_CID_DTX_LATCH_STATUS:
+ clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
+
+ /* If state has not changed: do not send new event. */
+ if (ddev->state.latch_status == in->data[0])
+ goto out;
+
+ ddev->state.latch_status = in->data[0];
+
+ event.status.e.length = sizeof(u16);
+ event.status.e.code = SDTX_EVENT_LATCH_STATUS;
+ event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
+ break;
+ }
+
+ sdtx_push_event(ddev, &event.common);
+
+ /* Update device mode on base connection change. */
+ if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
+ unsigned long delay;
+
+ delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
+ sdtx_update_device_mode(ddev, delay);
+ }
+
+out:
+ mutex_unlock(&ddev->write_lock);
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+/* -- State update functions. ----------------------------------------------- */
+
+static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
+{
+ return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
+ (mode == SDTX_DEVICE_MODE_TABLET)) ||
+ ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
+ (mode != SDTX_DEVICE_MODE_TABLET));
+}
+
+static void sdtx_device_mode_workfn(struct work_struct *work)
+{
+ struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
+ struct sdtx_status_event event;
+ struct ssam_bas_base_info base;
+ int status, tablet;
+ u8 mode;
+
+ /* Get operation mode. */
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status) {
+ dev_err(ddev->dev, "failed to get device mode: %d\n", status);
+ return;
+ }
+
+ /* Get base info. */
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
+ if (status) {
+ dev_err(ddev->dev, "failed to get base info: %d\n", status);
+ return;
+ }
+
+ /*
+ * In some cases (specifically when attaching the base), the device
+ * mode isn't updated right away. Thus we check if the device mode
+ * makes sense for the given base state and try again later if it
+ * doesn't.
+ */
+ if (sdtx_device_mode_invalid(mode, base.state)) {
+ dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
+ sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
+ return;
+ }
+
+ mutex_lock(&ddev->write_lock);
+ clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
+
+ /* Avoid sending duplicate device-mode events. */
+ if (ddev->state.device_mode == mode) {
+ mutex_unlock(&ddev->write_lock);
+ return;
+ }
+
+ ddev->state.device_mode = mode;
+
+ event.e.length = sizeof(u16);
+ event.e.code = SDTX_EVENT_DEVICE_MODE;
+ event.v = mode;
+
+ sdtx_push_event(ddev, &event.e);
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(ddev->mode_switch);
+
+ mutex_unlock(&ddev->write_lock);
+}
+
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
+{
+ schedule_delayed_work(&ddev->mode_work, delay);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
+ struct ssam_bas_base_info info)
+{
+ struct sdtx_base_info_event event;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ /* Prevent duplicate events. */
+ if (ddev->state.base.state == info.state &&
+ ddev->state.base.base_id == info.base_id)
+ return;
+
+ ddev->state.base = info;
+
+ event.e.length = sizeof(struct sdtx_base_info);
+ event.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.v.state = sdtx_translate_base_state(ddev, info.state);
+ event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
+
+ sdtx_push_event(ddev, &event.e);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
+{
+ struct sdtx_status_event event;
+ int tablet;
+
+ /*
+ * Note: This function must be called after updating the base state
+ * via __sdtx_device_state_update_base(), as we rely on the updated
+ * base state value in the validity check below.
+ */
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
+ dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
+ sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
+ return;
+ }
+
+ /* Prevent duplicate events. */
+ if (ddev->state.device_mode == mode)
+ return;
+
+ ddev->state.device_mode = mode;
+
+ /* Send event. */
+ event.e.length = sizeof(u16);
+ event.e.code = SDTX_EVENT_DEVICE_MODE;
+ event.v = mode;
+
+ sdtx_push_event(ddev, &event.e);
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(ddev->mode_switch);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
+{
+ struct sdtx_status_event event;
+
+ lockdep_assert_held(&ddev->write_lock);
+
+ /* Prevent duplicate events. */
+ if (ddev->state.latch_status == status)
+ return;
+
+ ddev->state.latch_status = status;
+
+ event.e.length = sizeof(struct sdtx_base_info);
+ event.e.code = SDTX_EVENT_BASE_CONNECTION;
+ event.v = sdtx_translate_latch_status(ddev, status);
+
+ sdtx_push_event(ddev, &event.e);
+}
+
+static void sdtx_device_state_workfn(struct work_struct *work)
+{
+ struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
+ struct ssam_bas_base_info base;
+ u8 mode, latch;
+ int status;
+
+ /* Mark everything as dirty. */
+ set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
+ set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
+ set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
+
+ /*
+ * Ensure that the state gets marked as dirty before continuing to
+ * query it. Necessary to ensure that clear_bit() calls in
+ * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
+ * bits if an event is received while updating the state here.
+ */
+ smp_mb__after_atomic();
+
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
+ if (status) {
+ dev_err(ddev->dev, "failed to get base state: %d\n", status);
+ return;
+ }
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+ if (status) {
+ dev_err(ddev->dev, "failed to get device mode: %d\n", status);
+ return;
+ }
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
+ if (status) {
+ dev_err(ddev->dev, "failed to get latch status: %d\n", status);
+ return;
+ }
+
+ mutex_lock(&ddev->write_lock);
+
+ /*
+ * If the respective dirty-bit has been cleared, an event has been
+ * received, updating this state. The queried state may thus be out of
+ * date. At this point, we can safely assume that the state provided
+ * by the event is either up to date, or we're about to receive
+ * another event updating it.
+ */
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
+ __sdtx_device_state_update_base(ddev, base);
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
+ __sdtx_device_state_update_mode(ddev, mode);
+
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
+ __sdtx_device_state_update_latch(ddev, latch);
+
+ mutex_unlock(&ddev->write_lock);
+}
+
+static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
+{
+ schedule_delayed_work(&ddev->state_work, delay);
+}
+
+
+/* -- Common device initialization. ----------------------------------------- */
+
+static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
+ struct ssam_controller *ctrl)
+{
+ int status, tablet_mode;
+
+ /* Basic initialization. */
+ kref_init(&ddev->kref);
+ init_rwsem(&ddev->lock);
+ ddev->dev = dev;
+ ddev->ctrl = ctrl;
+
+ ddev->mdev.minor = MISC_DYNAMIC_MINOR;
+ ddev->mdev.name = "surface_dtx";
+ ddev->mdev.nodename = "surface/dtx";
+ ddev->mdev.fops = &surface_dtx_fops;
+
+ ddev->notif.base.priority = 1;
+ ddev->notif.base.fn = sdtx_notifier;
+ ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
+ ddev->notif.event.id.instance = 0;
+ ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
+ ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ init_waitqueue_head(&ddev->waitq);
+ mutex_init(&ddev->write_lock);
+ init_rwsem(&ddev->client_lock);
+ INIT_LIST_HEAD(&ddev->client_list);
+
+ INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
+ INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
+
+ /*
+ * Get current device state. We want to guarantee that events are only
+ * sent when state actually changes. Thus we cannot use special
+ * "uninitialized" values, as that would cause problems when manually
+ * querying the state in surface_dtx_pm_complete(). I.e. we would not
+ * be able to detect state changes there if no change event has been
+ * received between driver initialization and first device suspension.
+ *
+ * Note that we also need to do this before registering the event
+ * notifier, as that may access the state values.
+ */
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
+ if (status)
+ return status;
+
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
+ if (status)
+ return status;
+
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
+ if (status)
+ return status;
+
+ /* Set up tablet mode switch. */
+ ddev->mode_switch = input_allocate_device();
+ if (!ddev->mode_switch)
+ return -ENOMEM;
+
+ ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
+ ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
+ ddev->mode_switch->id.bustype = BUS_HOST;
+ ddev->mode_switch->dev.parent = ddev->dev;
+
+ tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
+ input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
+
+ status = input_register_device(ddev->mode_switch);
+ if (status) {
+ input_free_device(ddev->mode_switch);
+ return status;
+ }
+
+ /* Set up event notifier. */
+ status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
+ if (status)
+ goto err_notif;
+
+ /* Register miscdevice. */
+ status = misc_register(&ddev->mdev);
+ if (status)
+ goto err_mdev;
+
+ /*
+ * Update device state in case it has changed between getting the
+ * initial mode and registering the event notifier.
+ */
+ sdtx_update_device_state(ddev, 0);
+ return 0;
+
+err_notif:
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+ cancel_delayed_work_sync(&ddev->mode_work);
+err_mdev:
+ input_unregister_device(ddev->mode_switch);
+ return status;
+}
+
+static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
+{
+ struct sdtx_device *ddev;
+ int status;
+
+ ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return ERR_PTR(-ENOMEM);
+
+ status = sdtx_device_init(ddev, dev, ctrl);
+ if (status) {
+ sdtx_device_put(ddev);
+ return ERR_PTR(status);
+ }
+
+ return ddev;
+}
+
+static void sdtx_device_destroy(struct sdtx_device *ddev)
+{
+ struct sdtx_client *client;
+
+ /*
+ * Mark device as shut-down. Prevent new clients from being added and
+ * new operations from being executed.
+ */
+ set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
+
+ /* Disable notifiers, prevent new events from arriving. */
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+
+ /* Stop mode_work, prevent access to mode_switch. */
+ cancel_delayed_work_sync(&ddev->mode_work);
+
+ /* Stop state_work. */
+ cancel_delayed_work_sync(&ddev->state_work);
+
+ /* With mode_work canceled, we can unregister the mode_switch. */
+ input_unregister_device(ddev->mode_switch);
+
+ /* Wake up async clients. */
+ down_write(&ddev->client_lock);
+ list_for_each_entry(client, &ddev->client_list, node) {
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+ }
+ up_write(&ddev->client_lock);
+
+ /* Wake up blocking clients. */
+ wake_up_interruptible(&ddev->waitq);
+
+ /*
+ * Wait for clients to finish their current operation. After this, the
+ * controller and device references are guaranteed to be no longer in
+ * use.
+ */
+ down_write(&ddev->lock);
+ ddev->dev = NULL;
+ ddev->ctrl = NULL;
+ up_write(&ddev->lock);
+
+ /* Finally remove the misc-device. */
+ misc_deregister(&ddev->mdev);
+
+ /*
+ * We're now guaranteed that sdtx_device_open() won't be called any
+ * more, so we can now drop out reference.
+ */
+ sdtx_device_put(ddev);
+}
+
+
+/* -- PM ops. --------------------------------------------------------------- */
+
+#ifdef CONFIG_PM_SLEEP
+
+static void surface_dtx_pm_complete(struct device *dev)
+{
+ struct sdtx_device *ddev = dev_get_drvdata(dev);
+
+ /*
+ * Normally, the EC will store events while suspended (i.e. in
+ * display-off state) and release them when resumed (i.e. transitioned
+ * to display-on state). During hibernation, however, the EC will be
+ * shut down and does not store events. Furthermore, events might be
+ * dropped during prolonged suspension (it is currently unknown how
+ * big this event buffer is and how it behaves on overruns).
+ *
+ * To prevent any problems, we update the device state here. We do
+ * this delayed to ensure that any events sent by the EC directly
+ * after resuming will be handled first. The delay below has been
+ * chosen (experimentally), so that there should be ample time for
+ * these events to be handled, before we check and, if necessary,
+ * update the state.
+ */
+ sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
+}
+
+static const struct dev_pm_ops surface_dtx_pm_ops = {
+ .complete = surface_dtx_pm_complete,
+};
+
+#else /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops surface_dtx_pm_ops = {};
+
+#endif /* CONFIG_PM_SLEEP */
+
+
+/* -- Platform driver. ------------------------------------------------------ */
+
+static int surface_dtx_platform_probe(struct platform_device *pdev)
+{
+ struct ssam_controller *ctrl;
+ struct sdtx_device *ddev;
+
+ /* Link to EC. */
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ ddev = sdtx_device_create(&pdev->dev, ctrl);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ platform_set_drvdata(pdev, ddev);
+ return 0;
+}
+
+static int surface_dtx_platform_remove(struct platform_device *pdev)
+{
+ sdtx_device_destroy(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static const struct acpi_device_id surface_dtx_acpi_match[] = {
+ { "MSHW0133", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
+
+static struct platform_driver surface_dtx_platform_driver = {
+ .probe = surface_dtx_platform_probe,
+ .remove = surface_dtx_platform_remove,
+ .driver = {
+ .name = "surface_dtx_pltf",
+ .acpi_match_table = surface_dtx_acpi_match,
+ .pm = &surface_dtx_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- SSAM device driver. --------------------------------------------------- */
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+static int surface_dtx_ssam_probe(struct ssam_device *sdev)
+{
+ struct sdtx_device *ddev;
+
+ ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ssam_device_set_drvdata(sdev, ddev);
+ return 0;
+}
+
+static void surface_dtx_ssam_remove(struct ssam_device *sdev)
+{
+ sdtx_device_destroy(ssam_device_get_drvdata(sdev));
+}
+
+static const struct ssam_device_id surface_dtx_ssam_match[] = {
+ { SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
+
+static struct ssam_device_driver surface_dtx_ssam_driver = {
+ .probe = surface_dtx_ssam_probe,
+ .remove = surface_dtx_ssam_remove,
+ .match_table = surface_dtx_ssam_match,
+ .driver = {
+ .name = "surface_dtx",
+ .pm = &surface_dtx_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int ssam_dtx_driver_register(void)
+{
+ return ssam_device_driver_register(&surface_dtx_ssam_driver);
+}
+
+static void ssam_dtx_driver_unregister(void)
+{
+ ssam_device_driver_unregister(&surface_dtx_ssam_driver);
+}
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static int ssam_dtx_driver_register(void)
+{
+ return 0;
+}
+
+static void ssam_dtx_driver_unregister(void)
+{
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+
+/* -- Module setup. --------------------------------------------------------- */
+
+static int __init surface_dtx_init(void)
+{
+ int status;
+
+ status = ssam_dtx_driver_register();
+ if (status)
+ return status;
+
+ status = platform_driver_register(&surface_dtx_platform_driver);
+ if (status)
+ ssam_dtx_driver_unregister();
+
+ return status;
+}
+module_init(surface_dtx_init);
+
+static void __exit surface_dtx_exit(void)
+{
+ platform_driver_unregister(&surface_dtx_platform_driver);
+ ssam_dtx_driver_unregister();
+}
+module_exit(surface_dtx_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
new file mode 100644
index 000000000000..6373d3b5eb7f
--- /dev/null
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Platform Profile / Performance Mode driver for Surface System
+ * Aggregator Module (thermal subsystem).
+ *
+ * Copyright (C) 2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_profile.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/device.h>
+
+enum ssam_tmp_profile {
+ SSAM_TMP_PROFILE_NORMAL = 1,
+ SSAM_TMP_PROFILE_BATTERY_SAVER = 2,
+ SSAM_TMP_PROFILE_BETTER_PERFORMANCE = 3,
+ SSAM_TMP_PROFILE_BEST_PERFORMANCE = 4,
+};
+
+struct ssam_tmp_profile_info {
+ __le32 profile;
+ __le16 unknown1;
+ __le16 unknown2;
+} __packed;
+
+struct ssam_tmp_profile_device {
+ struct ssam_device *sdev;
+ struct platform_profile_handler handler;
+};
+
+SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_profile_get, struct ssam_tmp_profile_info, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x02,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_profile_set, __le32, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x03,
+});
+
+static int ssam_tmp_profile_get(struct ssam_device *sdev, enum ssam_tmp_profile *p)
+{
+ struct ssam_tmp_profile_info info;
+ int status;
+
+ status = ssam_retry(__ssam_tmp_profile_get, sdev, &info);
+ if (status < 0)
+ return status;
+
+ *p = le32_to_cpu(info.profile);
+ return 0;
+}
+
+static int ssam_tmp_profile_set(struct ssam_device *sdev, enum ssam_tmp_profile p)
+{
+ __le32 profile_le = cpu_to_le32(p);
+
+ return ssam_retry(__ssam_tmp_profile_set, sdev, &profile_le);
+}
+
+static int convert_ssam_to_profile(struct ssam_device *sdev, enum ssam_tmp_profile p)
+{
+ switch (p) {
+ case SSAM_TMP_PROFILE_NORMAL:
+ return PLATFORM_PROFILE_BALANCED;
+
+ case SSAM_TMP_PROFILE_BATTERY_SAVER:
+ return PLATFORM_PROFILE_LOW_POWER;
+
+ case SSAM_TMP_PROFILE_BETTER_PERFORMANCE:
+ return PLATFORM_PROFILE_BALANCED_PERFORMANCE;
+
+ case SSAM_TMP_PROFILE_BEST_PERFORMANCE:
+ return PLATFORM_PROFILE_PERFORMANCE;
+
+ default:
+ dev_err(&sdev->dev, "invalid performance profile: %d", p);
+ return -EINVAL;
+ }
+}
+
+static int convert_profile_to_ssam(struct ssam_device *sdev, enum platform_profile_option p)
+{
+ switch (p) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ return SSAM_TMP_PROFILE_BATTERY_SAVER;
+
+ case PLATFORM_PROFILE_BALANCED:
+ return SSAM_TMP_PROFILE_NORMAL;
+
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
+ return SSAM_TMP_PROFILE_BETTER_PERFORMANCE;
+
+ case PLATFORM_PROFILE_PERFORMANCE:
+ return SSAM_TMP_PROFILE_BEST_PERFORMANCE;
+
+ default:
+ /* This should have already been caught by platform_profile_store(). */
+ WARN(true, "unsupported platform profile");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct ssam_tmp_profile_device *tpd;
+ enum ssam_tmp_profile tp;
+ int status;
+
+ tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+
+ status = ssam_tmp_profile_get(tpd->sdev, &tp);
+ if (status)
+ return status;
+
+ status = convert_ssam_to_profile(tpd->sdev, tp);
+ if (status < 0)
+ return status;
+
+ *profile = status;
+ return 0;
+}
+
+static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct ssam_tmp_profile_device *tpd;
+ int tp;
+
+ tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+
+ tp = convert_profile_to_ssam(tpd->sdev, profile);
+ if (tp < 0)
+ return tp;
+
+ return ssam_tmp_profile_set(tpd->sdev, tp);
+}
+
+static int surface_platform_profile_probe(struct ssam_device *sdev)
+{
+ struct ssam_tmp_profile_device *tpd;
+
+ tpd = devm_kzalloc(&sdev->dev, sizeof(*tpd), GFP_KERNEL);
+ if (!tpd)
+ return -ENOMEM;
+
+ tpd->sdev = sdev;
+
+ tpd->handler.profile_get = ssam_platform_profile_get;
+ tpd->handler.profile_set = ssam_platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+
+ platform_profile_register(&tpd->handler);
+ return 0;
+}
+
+static void surface_platform_profile_remove(struct ssam_device *sdev)
+{
+ platform_profile_remove();
+}
+
+static const struct ssam_device_id ssam_platform_profile_match[] = {
+ { SSAM_SDEV(TMP, 0x01, 0x00, 0x01) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
+
+static struct ssam_device_driver surface_platform_profile = {
+ .probe = surface_platform_profile_probe,
+ .remove = surface_platform_profile_remove,
+ .match_table = ssam_platform_profile_match,
+ .driver = {
+ .name = "surface_platform_profile",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(surface_platform_profile);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Platform Profile Support for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surfacepro3_button.c b/drivers/platform/surface/surfacepro3_button.c
index d8afed5db94c..242fb690dcaf 100644
--- a/drivers/platform/surface/surfacepro3_button.c
+++ b/drivers/platform/surface/surfacepro3_button.c
@@ -40,8 +40,6 @@ static const guid_t MSHW0040_DSM_UUID =
#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
-ACPI_MODULE_NAME("surface pro 3 button");
-
MODULE_AUTHOR("Chen Yu");
MODULE_DESCRIPTION("Surface Pro3 Button Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 461ec61530eb..2714f7c3843e 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -123,6 +123,17 @@ config XIAOMI_WMI
To compile this driver as a module, choose M here: the module will
be called xiaomi-wmi.
+config GIGABYTE_WMI
+ tristate "Gigabyte WMI temperature driver"
+ depends on ACPI_WMI
+ depends on HWMON
+ help
+ Say Y here if you want to support WMI-based temperature reporting on
+ Gigabyte mainboards.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gigabyte-wmi.
+
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
depends on ACPI && THERMAL
@@ -193,6 +204,17 @@ config AMD_PMC
If you choose to compile this driver as a module the module will be
called amd-pmc.
+config ADV_SWBUTTON
+ tristate "Advantech ACPI Software Button Driver"
+ depends on ACPI && INPUT
+ help
+ Say Y here to enable support for Advantech software defined
+ button feature. More information can be found at
+ <http://www.advantech.com.tw/products/>
+
+ To compile this driver as a module, choose M here. The module will
+ be called adv_swbutton.
+
config APPLE_GMUX
tristate "Apple Gmux Driver"
depends on ACPI && PCI
@@ -410,6 +432,7 @@ config HP_WMI
depends on INPUT
depends on RFKILL || RFKILL = n
select INPUT_SPARSEKMAP
+ select ACPI_PLATFORM_PROFILE
help
Say Y here if you want to support WMI-based hotkeys on HP laptops and
to read data from WMI such as docking or ambient light sensor state.
@@ -1171,6 +1194,7 @@ config INTEL_MRFLD_PWRBTN
config INTEL_PMC_CORE
tristate "Intel PMC Core driver"
depends on PCI
+ depends on ACPI
help
The Intel Platform Controller Hub for Intel Core SoCs provides access
to Power Management Controller registers via various interfaces. This
@@ -1192,7 +1216,7 @@ config INTEL_PMT_CLASS
tristate
help
The Intel Platform Monitoring Technology (PMT) class driver provides
- the basic sysfs interface and file hierarchy uses by PMT devices.
+ the basic sysfs interface and file hierarchy used by PMT devices.
For more information, see:
<file:Documentation/ABI/testing/sysfs-class-intel_pmt>
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 60d554073749..dcc8cdb95b4d 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
+obj-$(CONFIG_GIGABYTE_WMI) += gigabyte-wmi.o
# Acer
obj-$(CONFIG_ACERHDF) += acerhdf.o
@@ -24,6 +25,9 @@ obj-$(CONFIG_ACER_WMI) += acer-wmi.o
# AMD
obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+# Advantech
+obj-$(CONFIG_ADV_SWBUTTON) += adv_swbutton.o
+
# Apple
obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
diff --git a/drivers/platform/x86/adv_swbutton.c b/drivers/platform/x86/adv_swbutton.c
new file mode 100644
index 000000000000..38693b735c87
--- /dev/null
+++ b/drivers/platform/x86/adv_swbutton.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * adv_swbutton.c - Software Button Interface Driver.
+ *
+ * (C) Copyright 2020 Advantech Corporation, Inc
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+
+#define ACPI_BUTTON_HID_SWBTN "AHC0310"
+
+#define ACPI_BUTTON_NOTIFY_SWBTN_RELEASE 0x86
+#define ACPI_BUTTON_NOTIFY_SWBTN_PRESSED 0x85
+
+struct adv_swbutton {
+ struct input_dev *input;
+ char phys[32];
+};
+
+/*-------------------------------------------------------------------------
+ * Driver Interface
+ *--------------------------------------------------------------------------
+ */
+static void adv_swbutton_notify(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct adv_swbutton *button = dev_get_drvdata(&device->dev);
+
+ switch (event) {
+ case ACPI_BUTTON_NOTIFY_SWBTN_RELEASE:
+ input_report_key(button->input, KEY_PROG1, 0);
+ input_sync(button->input);
+ break;
+ case ACPI_BUTTON_NOTIFY_SWBTN_PRESSED:
+ input_report_key(button->input, KEY_PROG1, 1);
+ input_sync(button->input);
+ break;
+ default:
+ dev_dbg(&device->dev, "Unsupported event [0x%x]\n", event);
+ }
+}
+
+static int adv_swbutton_probe(struct platform_device *device)
+{
+ struct adv_swbutton *button;
+ struct input_dev *input;
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ acpi_status status;
+ int error;
+
+ button = devm_kzalloc(&device->dev, sizeof(*button), GFP_KERNEL);
+ if (!button)
+ return -ENOMEM;
+
+ dev_set_drvdata(&device->dev, button);
+
+ input = devm_input_allocate_device(&device->dev);
+ if (!input)
+ return -ENOMEM;
+
+ button->input = input;
+ snprintf(button->phys, sizeof(button->phys), "%s/button/input0", ACPI_BUTTON_HID_SWBTN);
+
+ input->name = "Advantech Software Button";
+ input->phys = button->phys;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &device->dev;
+ set_bit(EV_REP, input->evbit);
+ input_set_capability(input, EV_KEY, KEY_PROG1);
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ device_init_wakeup(&device->dev, true);
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ adv_swbutton_notify,
+ device);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&device->dev, "Error installing notify handler\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adv_swbutton_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY,
+ adv_swbutton_notify);
+
+ return 0;
+}
+
+static const struct acpi_device_id button_device_ids[] = {
+ {ACPI_BUTTON_HID_SWBTN, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, button_device_ids);
+
+static struct platform_driver adv_swbutton_driver = {
+ .driver = {
+ .name = "adv_swbutton",
+ .acpi_match_table = button_device_ids,
+ },
+ .probe = adv_swbutton_probe,
+ .remove = adv_swbutton_remove,
+};
+module_platform_driver(adv_swbutton_driver);
+
+MODULE_AUTHOR("Andrea Ho");
+MODULE_DESCRIPTION("Advantech ACPI SW Button Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index bfea656e910c..4d2d32bfbe2a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1569,7 +1569,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr,
int idx)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct asus_laptop *asus = dev_get_drvdata(dev);
acpi_handle handle = asus->handle;
bool supported;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 9ca15f724343..ebaeb7bb80f5 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -47,6 +47,9 @@ MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>, "
MODULE_DESCRIPTION("Asus Generic WMI Driver");
MODULE_LICENSE("GPL");
+static bool fnlock_default = true;
+module_param(fnlock_default, bool, 0444);
+
#define to_asus_wmi_driver(pdrv) \
(container_of((pdrv), struct asus_wmi_driver, platform_driver))
@@ -2673,7 +2676,7 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
if (asus_wmi_has_fnlock_key(asus)) {
- asus->fnlock_locked = true;
+ asus->fnlock_locked = fnlock_default;
asus_wmi_fnlock_update(asus);
}
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 3e03e8d3a07f..9309ab5792cb 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -956,7 +956,7 @@ static int cmpc_ipml_add(struct acpi_device *acpi)
/*
* If RFKILL is disabled, rfkill_alloc will return ERR_PTR(-ENODEV).
* This is OK, however, since all other uses of the device will not
- * derefence it.
+ * dereference it.
*/
if (ipml->rf) {
retval = rfkill_register(ipml->rf);
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index 5bb2859c8285..f21248255529 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -2,7 +2,7 @@
/*
* Alienware AlienFX control
*
- * Copyright (C) 2014 Dell Inc <mario_limonciello@dell.com>
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -26,7 +26,7 @@
#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
-MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
MODULE_DESCRIPTION("Alienware special feature control");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index 3a1dbf199441..fc086b66f70b 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -647,6 +647,6 @@ module_exit(dell_smbios_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
MODULE_AUTHOR("Pali RohĂĄr <pali@kernel.org>");
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index 27a298b7c541..a1753485159c 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -205,7 +205,7 @@ fail_register:
return ret;
}
-static int dell_smbios_wmi_remove(struct wmi_device *wdev)
+static void dell_smbios_wmi_remove(struct wmi_device *wdev)
{
struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
int count;
@@ -218,7 +218,6 @@ static int dell_smbios_wmi_remove(struct wmi_device *wdev)
count = get_order(priv->req_buf_size);
free_pages((unsigned long)priv->buf, count);
mutex_unlock(&call_mutex);
- return 0;
}
static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
diff --git a/drivers/platform/x86/dell/dell-wmi-descriptor.c b/drivers/platform/x86/dell/dell-wmi-descriptor.c
index a068900ae8a1..c2a180202719 100644
--- a/drivers/platform/x86/dell/dell-wmi-descriptor.c
+++ b/drivers/platform/x86/dell/dell-wmi-descriptor.c
@@ -174,14 +174,13 @@ out:
return ret;
}
-static int dell_wmi_descriptor_remove(struct wmi_device *wdev)
+static void dell_wmi_descriptor_remove(struct wmi_device *wdev)
{
struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev);
mutex_lock(&list_mutex);
list_del(&priv->list);
mutex_unlock(&list_mutex);
- return 0;
}
static const struct wmi_device_id dell_wmi_descriptor_id_table[] = {
@@ -201,6 +200,6 @@ static struct wmi_driver dell_wmi_descriptor_driver = {
module_wmi_driver(dell_wmi_descriptor_driver);
MODULE_DEVICE_TABLE(wmi, dell_wmi_descriptor_id_table);
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
MODULE_DESCRIPTION("Dell WMI descriptor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
index f95d8ddace5a..c2dd2de6bc20 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
@@ -152,12 +152,11 @@ static int bios_attr_set_interface_probe(struct wmi_device *wdev, const void *co
return 0;
}
-static int bios_attr_set_interface_remove(struct wmi_device *wdev)
+static void bios_attr_set_interface_remove(struct wmi_device *wdev)
{
mutex_lock(&wmi_priv.mutex);
wmi_priv.bios_attr_wdev = NULL;
mutex_unlock(&wmi_priv.mutex);
- return 0;
}
static const struct wmi_device_id bios_attr_set_interface_id_table[] = {
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
index 5780b4d94759..339a082d6c18 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
@@ -119,12 +119,11 @@ static int bios_attr_pass_interface_probe(struct wmi_device *wdev, const void *c
return 0;
}
-static int bios_attr_pass_interface_remove(struct wmi_device *wdev)
+static void bios_attr_pass_interface_remove(struct wmi_device *wdev)
{
mutex_lock(&wmi_priv.mutex);
wmi_priv.password_attr_wdev = NULL;
mutex_unlock(&wmi_priv.mutex);
- return 0;
}
static const struct wmi_device_id bios_attr_pass_interface_id_table[] = {
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index 7410ccae650c..c8d276d78e92 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
union acpi_object *obj = NULL;
union acpi_object *elements;
struct kset *tmp_set;
+ int min_elements;
/* instance_id needs to be reset for each type GUID
* also, instance IDs are unique within GUID but not across
@@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
retval = alloc_attributes_data(attr_type);
if (retval)
return retval;
+
+ switch (attr_type) {
+ case ENUM: min_elements = 8; break;
+ case INT: min_elements = 9; break;
+ case STR: min_elements = 8; break;
+ case PO: min_elements = 4; break;
+ default:
+ pr_err("Error: Unknown attr_type: %d\n", attr_type);
+ return -EINVAL;
+ }
+
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, guid);
- if (!obj || obj->type != ACPI_TYPE_PACKAGE)
+ if (!obj)
return -ENODEV;
- elements = obj->package.elements;
mutex_lock(&wmi_priv.mutex);
- while (elements) {
+ while (obj) {
+ if (obj->type != ACPI_TYPE_PACKAGE) {
+ pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
+ retval = -EIO;
+ goto err_attr_init;
+ }
+
+ if (obj->package.count < min_elements) {
+ pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
+ obj->package.count, min_elements);
+ goto nextobj;
+ }
+
+ elements = obj->package.elements;
+
/* sanity checking */
if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
pr_debug("incorrect element type\n");
@@ -481,7 +506,6 @@ nextobj:
kfree(obj);
instance_id++;
obj = get_wmiobj_pointer(instance_id, guid);
- elements = obj ? obj->package.elements : NULL;
}
mutex_unlock(&wmi_priv.mutex);
@@ -604,7 +628,7 @@ static void __exit sysman_exit(void)
module_init(sysman_init);
module_exit(sysman_exit);
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
MODULE_AUTHOR("Prasanth Ksr <prasanth.ksr@dell.com>");
MODULE_AUTHOR("Divya Bharathi <divya.bharathi@dell.com>");
MODULE_DESCRIPTION("Dell platform setting control interface");
diff --git a/drivers/platform/x86/dell/dell-wmi.c b/drivers/platform/x86/dell/dell-wmi.c
index bbdb3e860892..5e1b7f897df5 100644
--- a/drivers/platform/x86/dell/dell-wmi.c
+++ b/drivers/platform/x86/dell/dell-wmi.c
@@ -714,10 +714,9 @@ static int dell_wmi_probe(struct wmi_device *wdev, const void *context)
return dell_wmi_input_setup(wdev);
}
-static int dell_wmi_remove(struct wmi_device *wdev)
+static void dell_wmi_remove(struct wmi_device *wdev)
{
dell_wmi_input_destroy(wdev);
- return 0;
}
static const struct wmi_device_id dell_wmi_id_table[] = {
{ .guid_string = DELL_EVENT_GUID },
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
new file mode 100644
index 000000000000..13d57434e60f
--- /dev/null
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Thomas Weißschuh <thomas@weissschuh.net>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+#define GIGABYTE_WMI_GUID "DEADBEEF-2001-0000-00A0-C90629100000"
+#define NUM_TEMPERATURE_SENSORS 6
+
+static bool force_load;
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load, "Force loading on unknown platform");
+
+static u8 usable_sensors_mask;
+
+enum gigabyte_wmi_commandtype {
+ GIGABYTE_WMI_BUILD_DATE_QUERY = 0x1,
+ GIGABYTE_WMI_MAINBOARD_TYPE_QUERY = 0x2,
+ GIGABYTE_WMI_FIRMWARE_VERSION_QUERY = 0x4,
+ GIGABYTE_WMI_MAINBOARD_NAME_QUERY = 0x5,
+ GIGABYTE_WMI_TEMPERATURE_QUERY = 0x125,
+};
+
+struct gigabyte_wmi_args {
+ u32 arg1;
+};
+
+static int gigabyte_wmi_perform_query(struct wmi_device *wdev,
+ enum gigabyte_wmi_commandtype command,
+ struct gigabyte_wmi_args *args, struct acpi_buffer *out)
+{
+ const struct acpi_buffer in = {
+ .length = sizeof(*args),
+ .pointer = args,
+ };
+
+ acpi_status ret = wmidev_evaluate_method(wdev, 0x0, command, &in, out);
+
+ if (ACPI_FAILURE(ret))
+ return -EIO;
+
+ return 0;
+}
+
+static int gigabyte_wmi_query_integer(struct wmi_device *wdev,
+ enum gigabyte_wmi_commandtype command,
+ struct gigabyte_wmi_args *args, u64 *res)
+{
+ union acpi_object *obj;
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ int ret;
+
+ ret = gigabyte_wmi_perform_query(wdev, command, args, &result);
+ if (ret)
+ return ret;
+ obj = result.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *res = obj->integer.value;
+ else
+ ret = -EIO;
+ kfree(result.pointer);
+ return ret;
+}
+
+static int gigabyte_wmi_temperature(struct wmi_device *wdev, u8 sensor, long *res)
+{
+ struct gigabyte_wmi_args args = {
+ .arg1 = sensor,
+ };
+ u64 temp;
+ acpi_status ret;
+
+ ret = gigabyte_wmi_query_integer(wdev, GIGABYTE_WMI_TEMPERATURE_QUERY, &args, &temp);
+ if (ret == 0) {
+ if (temp == 0)
+ return -ENODEV;
+ *res = (s8)temp * 1000; // value is a signed 8-bit integer
+ }
+ return ret;
+}
+
+static int gigabyte_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct wmi_device *wdev = dev_get_drvdata(dev);
+
+ return gigabyte_wmi_temperature(wdev, channel, val);
+}
+
+static umode_t gigabyte_wmi_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return usable_sensors_mask & BIT(channel) ? 0444 : 0;
+}
+
+static const struct hwmon_channel_info *gigabyte_wmi_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops gigabyte_wmi_hwmon_ops = {
+ .read = gigabyte_wmi_hwmon_read,
+ .is_visible = gigabyte_wmi_hwmon_is_visible,
+};
+
+static const struct hwmon_chip_info gigabyte_wmi_hwmon_chip_info = {
+ .ops = &gigabyte_wmi_hwmon_ops,
+ .info = gigabyte_wmi_hwmon_info,
+};
+
+static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
+{
+ int i;
+ long temp;
+ u8 r = 0;
+
+ for (i = 0; i < NUM_TEMPERATURE_SENSORS; i++) {
+ if (!gigabyte_wmi_temperature(wdev, i, &temp))
+ r |= BIT(i);
+ }
+ return r;
+}
+
+static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550 GAMING X V2"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M AORUS PRO-P"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M DS3H"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Z390 I AORUS PRO WIFI-CF"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 AORUS ELITE"),
+ }},
+ { .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 I AORUS PRO WIFI"),
+ }},
+ { }
+};
+
+static int gigabyte_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct device *hwmon_dev;
+
+ if (!dmi_check_system(gigabyte_wmi_known_working_platforms)) {
+ if (!force_load)
+ return -ENODEV;
+ dev_warn(&wdev->dev, "Forcing load on unknown platform");
+ }
+
+ usable_sensors_mask = gigabyte_wmi_detect_sensor_usability(wdev);
+ if (!usable_sensors_mask) {
+ dev_info(&wdev->dev, "No temperature sensors usable");
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&wdev->dev, "gigabyte_wmi", wdev,
+ &gigabyte_wmi_hwmon_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct wmi_device_id gigabyte_wmi_id_table[] = {
+ { GIGABYTE_WMI_GUID, NULL },
+ { }
+};
+
+static struct wmi_driver gigabyte_wmi_driver = {
+ .driver = {
+ .name = "gigabyte-wmi",
+ },
+ .id_table = gigabyte_wmi_id_table,
+ .probe = gigabyte_wmi_probe,
+};
+module_wmi_driver(gigabyte_wmi_driver);
+
+MODULE_DEVICE_TABLE(wmi, gigabyte_wmi_id_table);
+MODULE_AUTHOR("Thomas Weißschuh <thomas@weissschuh.net>");
+MODULE_DESCRIPTION("Gigabyte WMI temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index e94e59283ecb..027a1467d009 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -21,6 +21,7 @@
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/string.h>
@@ -85,7 +86,7 @@ enum hp_wmi_commandtype {
HPWMI_FEATURE2_QUERY = 0x0d,
HPWMI_WIRELESS2_QUERY = 0x1b,
HPWMI_POSTCODEERROR_QUERY = 0x2a,
- HPWMI_THERMAL_POLICY_QUERY = 0x4c,
+ HPWMI_THERMAL_PROFILE_QUERY = 0x4c,
};
enum hp_wmi_command {
@@ -119,6 +120,12 @@ enum hp_wireless2_bits {
HPWMI_POWER_FW_OR_HW = HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
};
+enum hp_thermal_profile {
+ HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
+ HP_THERMAL_PROFILE_DEFAULT = 0x01,
+ HP_THERMAL_PROFILE_COOL = 0x02
+};
+
#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
@@ -159,6 +166,8 @@ static const struct key_entry hp_wmi_keymap[] = {
static struct input_dev *hp_wmi_input_dev;
static struct platform_device *hp_wmi_platform_dev;
+static struct platform_profile_handler platform_profile_handler;
+static bool platform_profile_support;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
@@ -869,23 +878,98 @@ fail:
return err;
}
-static int thermal_policy_setup(struct platform_device *device)
+static int thermal_profile_get(void)
+{
+ return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
+}
+
+static int thermal_profile_set(int thermal_profile)
+{
+ return hp_wmi_perform_query(HPWMI_THERMAL_PROFILE_QUERY, HPWMI_WRITE, &thermal_profile,
+ sizeof(thermal_profile), 0);
+}
+
+static int platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ int tp;
+
+ tp = thermal_profile_get();
+ if (tp < 0)
+ return tp;
+
+ switch (tp) {
+ case HP_THERMAL_PROFILE_PERFORMANCE:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case HP_THERMAL_PROFILE_DEFAULT:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case HP_THERMAL_PROFILE_COOL:
+ *profile = PLATFORM_PROFILE_COOL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
{
int err, tp;
- tp = hp_wmi_read_int(HPWMI_THERMAL_POLICY_QUERY);
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = HP_THERMAL_PROFILE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = HP_THERMAL_PROFILE_DEFAULT;
+ break;
+ case PLATFORM_PROFILE_COOL:
+ tp = HP_THERMAL_PROFILE_COOL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = thermal_profile_set(tp);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int thermal_profile_setup(void)
+{
+ int err, tp;
+
+ tp = thermal_profile_get();
if (tp < 0)
return tp;
/*
- * call thermal policy write command to ensure that the firmware correctly
+ * call thermal profile write command to ensure that the firmware correctly
* sets the OEM variables for the DPTF
*/
- err = hp_wmi_perform_query(HPWMI_THERMAL_POLICY_QUERY, HPWMI_WRITE, &tp,
- sizeof(tp), 0);
+ err = thermal_profile_set(tp);
if (err)
return err;
+ platform_profile_handler.profile_get = platform_profile_get,
+ platform_profile_handler.profile_set = platform_profile_set,
+
+ set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
+
+ err = platform_profile_register(&platform_profile_handler);
+ if (err)
+ return err;
+
+ platform_profile_support = true;
+
return 0;
}
@@ -900,7 +984,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
- thermal_policy_setup(device);
+ thermal_profile_setup();
return 0;
}
@@ -927,6 +1011,9 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
rfkill_destroy(wwan_rfkill);
}
+ if (platform_profile_support)
+ platform_profile_remove();
+
return 0;
}
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 57cc92891a57..078648a9201b 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -483,11 +483,16 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
goto wakeup;
/*
- * Switch events will wake the device and report the new switch
- * position to the input subsystem.
+ * Some devices send (duplicate) tablet-mode events when moved
+ * around even though the mode has not changed; and they do this
+ * even when suspended.
+ * Update the switch state in case it changed and then return
+ * without waking up to avoid spurious wakeups.
*/
- if (priv->switches && (event == 0xcc || event == 0xcd))
- goto wakeup;
+ if (event == 0xcc || event == 0xcd) {
+ report_tablet_mode_event(priv->switches, event);
+ return;
+ }
/* Wake up on 5-button array events only. */
if (event == 0xc0 || !priv->array)
@@ -501,9 +506,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
wakeup:
pm_wakeup_hard_event(&device->dev);
- if (report_tablet_mode_event(priv->switches, event))
- return;
-
return;
}
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 3fdf4cbec9ad..888a764efad1 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -63,9 +63,6 @@ static const struct key_entry intel_vbtn_switchmap[] = {
{ KE_END }
};
-#define KEYMAP_LEN \
- (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1)
-
struct intel_vbtn_priv {
struct input_dev *buttons_dev;
struct input_dev *switches_dev;
diff --git a/drivers/platform/x86/intel-wmi-sbl-fw-update.c b/drivers/platform/x86/intel-wmi-sbl-fw-update.c
index ea87fa0786e8..3c86e0108a24 100644
--- a/drivers/platform/x86/intel-wmi-sbl-fw-update.c
+++ b/drivers/platform/x86/intel-wmi-sbl-fw-update.c
@@ -117,10 +117,9 @@ static int intel_wmi_sbl_fw_update_probe(struct wmi_device *wdev,
return 0;
}
-static int intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
+static void intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
{
dev_info(&wdev->dev, "Slim Bootloader signaling driver removed\n");
- return 0;
}
static const struct wmi_device_id intel_wmi_sbl_id_table[] = {
diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c
index 974c22a7ff61..4ae87060d18b 100644
--- a/drivers/platform/x86/intel-wmi-thunderbolt.c
+++ b/drivers/platform/x86/intel-wmi-thunderbolt.c
@@ -66,11 +66,10 @@ static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev,
return ret;
}
-static int intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
+static void intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
{
sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group);
kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
- return 0;
}
static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
diff --git a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
index 0df2e82dd249..9606a994af22 100644
--- a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
+++ b/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
@@ -58,7 +58,7 @@ static int chtdc_ti_pwrbtn_probe(struct platform_device *pdev)
err = devm_request_threaded_irq(dev, irq, NULL,
chtdc_ti_pwrbtn_interrupt,
- 0, KBUILD_MODNAME, input);
+ IRQF_ONESHOT, KBUILD_MODNAME, input);
if (err)
return err;
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index b5888aeb4bcf..b0e486a6bdfb 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -23,7 +23,9 @@
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/uaccess.h>
+#include <linux/uuid.h>
+#include <acpi/acpi_bus.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/msr.h>
@@ -31,7 +33,8 @@
#include "intel_pmc_core.h"
-static struct pmc_dev pmc;
+#define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972"
+#define ACPI_GET_LOW_MODE_REGISTERS 1
/* PKGC MSRs are common across Intel Core SoCs */
static const struct pmc_bit_map msr_map[] = {
@@ -380,6 +383,8 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
* a list of core SoCs using this.
*/
{"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
/* Below two cannot be used for LTR_IGNORE */
{"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
{"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
@@ -401,6 +406,7 @@ static const struct pmc_reg_map cnp_reg_map = {
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
+ .etr3_offset = ETR3_OFFSET,
};
static const struct pmc_reg_map icl_reg_map = {
@@ -418,6 +424,7 @@ static const struct pmc_reg_map icl_reg_map = {
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
+ .etr3_offset = ETR3_OFFSET,
};
static const struct pmc_bit_map tgl_clocksource_status_map[] = {
@@ -579,14 +586,65 @@ static const struct pmc_reg_map tgl_reg_map = {
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
- .lpm_modes = tgl_lpm_modes,
+ .lpm_num_maps = TGL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET,
.lpm_en_offset = TGL_LPM_EN_OFFSET,
+ .lpm_priority_offset = TGL_LPM_PRI_OFFSET,
.lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
.lpm_sts = tgl_lpm_maps,
.lpm_status_offset = TGL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
+ .etr3_offset = ETR3_OFFSET,
};
+static void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ const int num_maps = pmcdev->map->lpm_num_maps;
+ u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4;
+ union acpi_object *out_obj;
+ struct acpi_device *adev;
+ guid_t s0ix_dsm_guid;
+ u32 *lpm_req_regs, *addr;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return;
+
+ guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid);
+
+ out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0,
+ ACPI_GET_LOW_MODE_REGISTERS, NULL);
+ if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
+ u32 size = out_obj->buffer.length;
+
+ if (size != lpm_size) {
+ acpi_handle_debug(adev->handle,
+ "_DSM returned unexpected buffer size, have %u, expect %u\n",
+ size, lpm_size);
+ goto free_acpi_obj;
+ }
+ } else {
+ acpi_handle_debug(adev->handle,
+ "_DSM function 0 evaluation failed\n");
+ goto free_acpi_obj;
+ }
+
+ addr = (u32 *)out_obj->buffer.pointer;
+
+ lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32),
+ GFP_KERNEL);
+ if (!lpm_req_regs)
+ goto free_acpi_obj;
+
+ memcpy(lpm_req_regs, addr, lpm_size);
+ pmcdev->lpm_req_regs = lpm_req_regs;
+
+free_acpi_obj:
+ ACPI_FREE(out_obj);
+}
+
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
@@ -603,6 +661,115 @@ static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
return (u64)value * pmcdev->map->slp_s0_res_counter_step;
}
+static int set_etr3(struct pmc_dev *pmcdev)
+{
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+ int err;
+
+ if (!map->etr3_offset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pmcdev->lock);
+
+ /* check if CF9 is locked */
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ if (reg & ETR3_CF9LOCK) {
+ err = -EACCES;
+ goto out_unlock;
+ }
+
+ /* write CF9 global reset bit */
+ reg |= ETR3_CF9GR;
+ pmc_core_reg_write(pmcdev, map->etr3_offset, reg);
+
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ if (!(reg & ETR3_CF9GR)) {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ err = 0;
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+static umode_t etr3_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+
+ mutex_lock(&pmcdev->lock);
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ mutex_unlock(&pmcdev->lock);
+
+ return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
+}
+
+static ssize_t etr3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 reg;
+
+ if (!map->etr3_offset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+ reg &= ETR3_CF9GR | ETR3_CF9LOCK;
+
+ mutex_unlock(&pmcdev->lock);
+
+ return sysfs_emit(buf, "0x%08x", reg);
+}
+
+static ssize_t etr3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ int err;
+ u32 reg;
+
+ err = kstrtouint(buf, 16, &reg);
+ if (err)
+ return err;
+
+ /* allow only CF9 writes */
+ if (reg != ETR3_CF9GR)
+ return -EINVAL;
+
+ err = set_etr3(pmcdev);
+ if (err)
+ return err;
+
+ return len;
+}
+static DEVICE_ATTR_RW(etr3);
+
+static struct attribute *pmc_attrs[] = {
+ &dev_attr_etr3.attr,
+ NULL
+};
+
+static const struct attribute_group pmc_attr_group = {
+ .attrs = pmc_attrs,
+ .is_visible = etr3_is_visible,
+};
+
+static const struct attribute_group *pmc_dev_groups[] = {
+ &pmc_attr_group,
+ NULL
+};
+
static int pmc_core_dev_state_get(void *data, u64 *val)
{
struct pmc_dev *pmcdev = data;
@@ -617,9 +784,8 @@ static int pmc_core_dev_state_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
-static int pmc_core_check_read_lock_bit(void)
+static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev)
{
- struct pmc_dev *pmcdev = &pmc;
u32 value;
value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
@@ -744,28 +910,26 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
/* This function should return link status, 0 means ready */
-static int pmc_core_mtpmc_link_status(void)
+static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev)
{
- struct pmc_dev *pmcdev = &pmc;
u32 value;
value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
}
-static int pmc_core_send_msg(u32 *addr_xram)
+static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram)
{
- struct pmc_dev *pmcdev = &pmc;
u32 dest;
int timeout;
for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
- if (pmc_core_mtpmc_link_status() == 0)
+ if (pmc_core_mtpmc_link_status(pmcdev) == 0)
break;
msleep(5);
}
- if (timeout <= 0 && pmc_core_mtpmc_link_status())
+ if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev))
return -EBUSY;
dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
@@ -791,7 +955,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
mutex_lock(&pmcdev->lock);
- if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
+ if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) {
err = -EBUSY;
goto out_unlock;
}
@@ -799,7 +963,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
msleep(10);
val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
- if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
+ if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) {
err = -EBUSY;
goto out_unlock;
}
@@ -842,7 +1006,7 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
mutex_lock(&pmcdev->lock);
- if (pmc_core_send_msg(&mphy_common_reg) != 0) {
+ if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) {
err = -EBUSY;
goto out_unlock;
}
@@ -863,9 +1027,8 @@ out_unlock:
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
-static int pmc_core_send_ltr_ignore(u32 value)
+static int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
{
- struct pmc_dev *pmcdev = &pmc;
const struct pmc_reg_map *map = pmcdev->map;
u32 reg;
int err = 0;
@@ -891,6 +1054,8 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
u32 buf_size, value;
int err;
@@ -900,7 +1065,7 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file,
if (err)
return err;
- err = pmc_core_send_ltr_ignore(value);
+ err = pmc_core_send_ltr_ignore(pmcdev, value);
return err == 0 ? count : err;
}
@@ -1029,21 +1194,26 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset,
+ const int lpm_adj_x2)
+{
+ u64 lpm_res = pmc_core_reg_read(pmcdev, offset);
+
+ return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
+}
+
static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const char **lpm_modes = pmcdev->map->lpm_modes;
+ const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
u32 offset = pmcdev->map->lpm_residency_offset;
- u32 lpm_en;
- int index;
+ int i, mode;
- lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
- seq_printf(s, "status substate residency\n");
- for (index = 0; lpm_modes[index]; index++) {
- seq_printf(s, "%7s %7s %-15u\n",
- BIT(index) & lpm_en ? "Enabled" : " ",
- lpm_modes[index], pmc_core_reg_read(pmcdev, offset));
- offset += 4;
+ seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
+
+ pmc_for_each_mode(i, mode, pmcdev) {
+ seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
+ adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2));
}
return 0;
@@ -1074,6 +1244,190 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
+static void pmc_core_substate_req_header_show(struct seq_file *s)
+{
+ struct pmc_dev *pmcdev = s->private;
+ int i, mode;
+
+ seq_printf(s, "%30s |", "Element");
+ pmc_for_each_mode(i, mode, pmcdev)
+ seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
+
+ seq_printf(s, " %9s |\n", "Status");
+}
+
+static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ const struct pmc_bit_map *map;
+ const int num_maps = pmcdev->map->lpm_num_maps;
+ u32 sts_offset = pmcdev->map->lpm_status_offset;
+ u32 *lpm_req_regs = pmcdev->lpm_req_regs;
+ int mp;
+
+ /* Display the header */
+ pmc_core_substate_req_header_show(s);
+
+ /* Loop over maps */
+ for (mp = 0; mp < num_maps; mp++) {
+ u32 req_mask = 0;
+ u32 lpm_status;
+ int mode, idx, i, len = 32;
+
+ /*
+ * Capture the requirements and create a mask so that we only
+ * show an element if it's required for at least one of the
+ * enabled low power modes
+ */
+ pmc_for_each_mode(idx, mode, pmcdev)
+ req_mask |= lpm_req_regs[mp + (mode * num_maps)];
+
+ /* Get the last latched status for this map */
+ lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4));
+
+ /* Loop over elements in this map */
+ map = maps[mp];
+ for (i = 0; map[i].name && i < len; i++) {
+ u32 bit_mask = map[i].bit_mask;
+
+ if (!(bit_mask & req_mask))
+ /*
+ * Not required for any enabled states
+ * so don't display
+ */
+ continue;
+
+ /* Display the element name in the first column */
+ seq_printf(s, "%30s |", map[i].name);
+
+ /* Loop over the enabled states and display if required */
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
+ seq_printf(s, " %9s |",
+ "Required");
+ else
+ seq_printf(s, " %9s |", " ");
+ }
+
+ /* In Status column, show the last captured state of this agent */
+ if (lpm_status & bit_mask)
+ seq_printf(s, " %9s |", "Yes");
+ else
+ seq_printf(s, " %9s |", " ");
+
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
+
+static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ bool c10;
+ u32 reg;
+ int idx, mode;
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
+ if (reg & LPM_STS_LATCH_MODE) {
+ seq_puts(s, "c10");
+ c10 = false;
+ } else {
+ seq_puts(s, "[c10]");
+ c10 = true;
+ }
+
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ if ((BIT(mode) & reg) && !c10)
+ seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
+ else
+ seq_printf(s, " %s", pmc_lpm_modes[mode]);
+ }
+
+ seq_puts(s, " clear\n");
+
+ return 0;
+}
+
+static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+ bool clear = false, c10 = false;
+ unsigned char buf[8];
+ int idx, m, mode;
+ u32 reg;
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ /*
+ * Allowed strings are:
+ * Any enabled substate, e.g. 'S0i2.0'
+ * 'c10'
+ * 'clear'
+ */
+ mode = sysfs_match_string(pmc_lpm_modes, buf);
+
+ /* Check string matches enabled mode */
+ pmc_for_each_mode(idx, m, pmcdev)
+ if (mode == m)
+ break;
+
+ if (mode != m || mode < 0) {
+ if (sysfs_streq(buf, "clear"))
+ clear = true;
+ else if (sysfs_streq(buf, "c10"))
+ c10 = true;
+ else
+ return -EINVAL;
+ }
+
+ if (clear) {
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset);
+ reg |= ETR3_CLEAR_LPM_EVENTS;
+ pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+ }
+
+ if (c10) {
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
+ reg &= ~LPM_STS_LATCH_MODE;
+ pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+ }
+
+ /*
+ * For LPM mode latching we set the latch enable bit and selected mode
+ * and clear everything else.
+ */
+ reg = LPM_STS_LATCH_MODE | BIT(mode);
+ mutex_lock(&pmcdev->lock);
+ pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+}
+DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
+
static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
@@ -1095,6 +1449,45 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
+static void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
+{
+ u8 lpm_priority[LPM_MAX_NUM_MODES];
+ u32 lpm_en;
+ int mode, i, p;
+
+ /* Use LPM Maps to indicate support for substates */
+ if (!pmcdev->map->lpm_num_maps)
+ return;
+
+ lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
+ pmcdev->num_lpm_modes = hweight32(lpm_en);
+
+ /* Each byte contains information for 2 modes (7:4 and 3:0) */
+ for (mode = 0; mode < LPM_MAX_NUM_MODES; mode += 2) {
+ u8 priority = pmc_core_reg_read_byte(pmcdev,
+ pmcdev->map->lpm_priority_offset + (mode / 2));
+ int pri0 = GENMASK(3, 0) & priority;
+ int pri1 = (GENMASK(7, 4) & priority) >> 4;
+
+ lpm_priority[pri0] = mode;
+ lpm_priority[pri1] = mode + 1;
+ }
+
+ /*
+ * Loop though all modes from lowest to highest priority,
+ * and capture all enabled modes in order
+ */
+ i = 0;
+ for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
+ int mode = lpm_priority[p];
+
+ if (!(BIT(mode) & lpm_en))
+ continue;
+
+ pmcdev->lpm_en_modes[i++] = mode;
+ }
+}
+
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -1153,6 +1546,15 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
debugfs_create_file("substate_live_status_registers", 0444,
pmcdev->dbgfs_dir, pmcdev,
&pmc_core_substate_l_sts_regs_fops);
+ debugfs_create_file("lpm_latch_mode", 0644,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_lpm_latch_mode_fops);
+ }
+
+ if (pmcdev->lpm_req_regs) {
+ debugfs_create_file("substate_requirements", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_req_regs_fops);
}
}
@@ -1171,6 +1573,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map),
{}
};
@@ -1186,9 +1589,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
* the platform BIOS enforces 24Mhz crystal to shutdown
* before PMC can assert SLP_S0#.
*/
+static bool xtal_ignore;
static int quirk_xtal_ignore(const struct dmi_system_id *id)
{
- struct pmc_dev *pmcdev = &pmc;
+ xtal_ignore = true;
+ return 0;
+}
+
+static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
+{
u32 value;
value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
@@ -1197,7 +1606,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
/* Low Voltage Mode Enable */
value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
- return 0;
}
static const struct dmi_system_id pmc_core_dmi_table[] = {
@@ -1212,16 +1620,30 @@ static const struct dmi_system_id pmc_core_dmi_table[] = {
{}
};
+static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
+{
+ dmi_check_system(pmc_core_dmi_table);
+
+ if (xtal_ignore)
+ pmc_core_xtal_ignore(pmcdev);
+}
+
static int pmc_core_probe(struct platform_device *pdev)
{
static bool device_initialized;
- struct pmc_dev *pmcdev = &pmc;
+ struct pmc_dev *pmcdev;
const struct x86_cpu_id *cpu_id;
u64 slp_s0_addr;
if (device_initialized)
return -ENODEV;
+ pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
+ if (!pmcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pmcdev);
+
cpu_id = x86_match_cpu(intel_pmc_core_ids);
if (!cpu_id)
return -ENODEV;
@@ -1251,9 +1673,13 @@ static int pmc_core_probe(struct platform_device *pdev)
return -ENOMEM;
mutex_init(&pmcdev->lock);
- platform_set_drvdata(pdev, pmcdev);
- pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
- dmi_check_system(pmc_core_dmi_table);
+
+ pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev);
+ pmc_core_get_low_power_modes(pmcdev);
+ pmc_core_do_dmi_quirks(pmcdev);
+
+ if (pmcdev->map == &tgl_reg_map)
+ pmc_core_get_tgl_lpm_reqs(pdev);
/*
* On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
@@ -1261,7 +1687,7 @@ static int pmc_core_probe(struct platform_device *pdev)
*/
if (pmcdev->map == &tgl_reg_map) {
dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
- pmc_core_send_ltr_ignore(3);
+ pmc_core_send_ltr_ignore(pmcdev, 3);
}
pmc_core_dbgfs_register(pmcdev);
@@ -1384,6 +1810,7 @@ static struct platform_driver pmc_core_driver = {
.name = "intel_pmc_core",
.acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
.pm = &pmc_core_pm_ops,
+ .dev_groups = pmc_dev_groups,
},
.probe = pmc_core_probe,
.remove = pmc_core_remove,
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index f33cd2c34835..e8dae9c6c45f 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -187,20 +187,38 @@ enum ppfear_regs {
#define ICL_PMC_LTR_WIGIG 0x1BFC
#define ICL_PMC_SLP_S0_RES_COUNTER_STEP 0x64
-#define TGL_NUM_IP_IGN_ALLOWED 22
+#define LPM_MAX_NUM_MODES 8
+#define GET_X2_COUNTER(v) ((v) >> 1)
+#define LPM_STS_LATCH_MODE BIT(31)
+
#define TGL_PMC_SLP_S0_RES_COUNTER_STEP 0x7A
+#define TGL_PMC_LTR_THC0 0x1C04
+#define TGL_PMC_LTR_THC1 0x1C08
+#define TGL_NUM_IP_IGN_ALLOWED 23
+#define TGL_PMC_LPM_RES_COUNTER_STEP_X2 61 /* 30.5us * 2 */
/*
* Tigerlake Power Management Controller register offsets
*/
+#define TGL_LPM_STS_LATCH_EN_OFFSET 0x1C34
#define TGL_LPM_EN_OFFSET 0x1C78
#define TGL_LPM_RESIDENCY_OFFSET 0x1C80
/* Tigerlake Low Power Mode debug registers */
#define TGL_LPM_STATUS_OFFSET 0x1C3C
#define TGL_LPM_LIVE_STATUS_OFFSET 0x1C5C
+#define TGL_LPM_PRI_OFFSET 0x1C7C
+#define TGL_LPM_NUM_MAPS 6
+
+/* Extended Test Mode Register 3 (CNL and later) */
+#define ETR3_OFFSET 0x1048
+#define ETR3_CF9GR BIT(20)
+#define ETR3_CF9LOCK BIT(31)
+
+/* Extended Test Mode Register LPM bits (TGL and later */
+#define ETR3_CLEAR_LPM_EVENTS BIT(28)
-const char *tgl_lpm_modes[] = {
+const char *pmc_lpm_modes[] = {
"S0i2.0",
"S0i2.1",
"S0i2.2",
@@ -258,11 +276,15 @@ struct pmc_reg_map {
const u32 ltr_ignore_max;
const u32 pm_vric1_offset;
/* Low Power Mode registers */
- const char **lpm_modes;
+ const int lpm_num_maps;
+ const int lpm_res_counter_step_x2;
+ const u32 lpm_sts_latch_en_offset;
const u32 lpm_en_offset;
+ const u32 lpm_priority_offset;
const u32 lpm_residency_offset;
const u32 lpm_status_offset;
const u32 lpm_live_status_offset;
+ const u32 etr3_offset;
};
/**
@@ -278,6 +300,9 @@ struct pmc_reg_map {
* @check_counters: On resume, check if counters are getting incremented
* @pc10_counter: PC10 residency counter
* @s0ix_counter: S0ix residency (step adjusted)
+ * @num_lpm_modes: Count of enabled modes
+ * @lpm_en_modes: Array of enabled modes from lowest to highest priority
+ * @lpm_req_regs: List of substate requirements
*
* pmc_dev contains info about power management controller device.
*/
@@ -292,6 +317,28 @@ struct pmc_dev {
bool check_counters; /* Check for counter increments on resume */
u64 pc10_counter;
u64 s0ix_counter;
+ int num_lpm_modes;
+ int lpm_en_modes[LPM_MAX_NUM_MODES];
+ u32 *lpm_req_regs;
};
+#define pmc_for_each_mode(i, mode, pmcdev) \
+ for (i = 0, mode = pmcdev->lpm_en_modes[i]; \
+ i < pmcdev->num_lpm_modes; \
+ i++, mode = pmcdev->lpm_en_modes[i])
+
+#define DEFINE_PMC_CORE_ATTR_WRITE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .release = single_release, \
+}
+
#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_pmt_class.c b/drivers/platform/x86/intel_pmt_class.c
index ee2b3bbeb83d..c86ff15b1ed5 100644
--- a/drivers/platform/x86/intel_pmt_class.c
+++ b/drivers/platform/x86/intel_pmt_class.c
@@ -20,6 +20,28 @@
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
/*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM). This list tracks those
+ * platforms as needed to handle those differences. Newer client
+ * platforms are expected to be fully compatible with server.
+ */
+static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
+ { PCI_VDEVICE(INTEL, 0x490e) }, /* DG1 */
+ { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
+ { }
+};
+
+bool intel_pmt_is_early_client_hw(struct device *dev)
+{
+ struct pci_dev *parent = to_pci_dev(dev->parent);
+
+ return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
+}
+EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
+
+/*
* sysfs
*/
static ssize_t
@@ -147,6 +169,30 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
* base address = end of discovery region + base offset
*/
entry->base_addr = disc_res->end + 1 + header->base_offset;
+
+ /*
+ * Some hardware use a different calculation for the base address
+ * when access_type == ACCESS_LOCAL. On the these systems
+ * ACCCESS_LOCAL refers to an address in the same BAR as the
+ * header but at a fixed offset. But as the header address was
+ * supplied to the driver, we don't know which BAR it was in.
+ * So search for the bar whose range includes the header address.
+ */
+ if (intel_pmt_is_early_client_hw(dev)) {
+ int i;
+
+ entry->base_addr = 0;
+ for (i = 0; i < 6; i++)
+ if (disc_res->start >= pci_resource_start(pci_dev, i) &&
+ (disc_res->start <= pci_resource_end(pci_dev, i))) {
+ entry->base_addr = pci_resource_start(pci_dev, i) +
+ header->base_offset;
+ break;
+ }
+ if (!entry->base_addr)
+ return -EINVAL;
+ }
+
break;
case ACCESS_BARID:
/*
diff --git a/drivers/platform/x86/intel_pmt_class.h b/drivers/platform/x86/intel_pmt_class.h
index de8f8139ba31..1337019c2873 100644
--- a/drivers/platform/x86/intel_pmt_class.h
+++ b/drivers/platform/x86/intel_pmt_class.h
@@ -44,6 +44,7 @@ struct intel_pmt_namespace {
struct device *dev);
};
+bool intel_pmt_is_early_client_hw(struct device *dev);
int intel_pmt_dev_create(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns,
struct platform_device *pdev, int idx);
diff --git a/drivers/platform/x86/intel_pmt_telemetry.c b/drivers/platform/x86/intel_pmt_telemetry.c
index f8a87614efa4..9b95ef050457 100644
--- a/drivers/platform/x86/intel_pmt_telemetry.c
+++ b/drivers/platform/x86/intel_pmt_telemetry.c
@@ -34,26 +34,6 @@ struct pmt_telem_priv {
struct intel_pmt_entry entry[];
};
-/*
- * Early implementations of PMT on client platforms have some
- * differences from the server platforms (which use the Out Of Band
- * Management Services Module OOBMSM). This list tracks those
- * platforms as needed to handle those differences. Newer client
- * platforms are expected to be fully compatible with server.
- */
-static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
- { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
- { }
-};
-
-static bool intel_pmt_is_early_client_hw(struct device *dev)
-{
- struct pci_dev *parent = to_pci_dev(dev->parent);
-
- return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
-}
-
static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
struct device *dev)
{
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
index a2a2d923e60c..df1fc6c719f3 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
@@ -21,12 +21,16 @@
#define PUNIT_MAILBOX_BUSY_BIT 31
/*
- * The average time to complete some commands is about 40us. The current
- * count is enough to satisfy 40us. But when the firmware is very busy, this
- * causes timeout occasionally. So increase to deal with some worst case
- * scenarios. Most of the command still complete in few us.
+ * The average time to complete mailbox commands is less than 40us. Most of
+ * the commands complete in few micro seconds. But the same firmware handles
+ * requests from all power management features.
+ * We can create a scenario where we flood the firmware with requests then
+ * the mailbox response can be delayed for 100s of micro seconds. So define
+ * two timeouts. One for average case and one for long.
+ * If the firmware is taking more than average, just call cond_resched().
*/
-#define OS_MAILBOX_RETRY_COUNT 100
+#define OS_MAILBOX_TIMEOUT_AVG_US 40
+#define OS_MAILBOX_TIMEOUT_MAX_US 1000
struct isst_if_device {
struct mutex mutex;
@@ -35,11 +39,13 @@ struct isst_if_device {
static int isst_if_mbox_cmd(struct pci_dev *pdev,
struct isst_if_mbox_cmd *mbox_cmd)
{
- u32 retries, data;
+ s64 tm_delta = 0;
+ ktime_t tm;
+ u32 data;
int ret;
/* Poll for rb bit == 0 */
- retries = OS_MAILBOX_RETRY_COUNT;
+ tm = ktime_get();
do {
ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
&data);
@@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
ret = -EBUSY;
+ tm_delta = ktime_us_delta(ktime_get(), tm);
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+ cond_resched();
continue;
}
ret = 0;
break;
- } while (--retries);
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
if (ret)
return ret;
@@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
return ret;
/* Poll for rb bit == 0 */
- retries = OS_MAILBOX_RETRY_COUNT;
+ tm_delta = 0;
+ tm = ktime_get();
do {
ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
&data);
@@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
ret = -EBUSY;
+ tm_delta = ktime_us_delta(ktime_get(), tm);
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+ cond_resched();
continue;
}
@@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
mbox_cmd->resp_data = data;
ret = 0;
break;
- } while (--retries);
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
return ret;
}
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index dd900a76d8de..20145b539335 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -678,7 +678,7 @@ static int __init acpi_init(void)
result = acpi_bus_register_driver(&acpi_driver);
if (result < 0) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error registering driver\n"));
+ pr_debug("Error registering driver\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 6388c3c705a6..d4f444401496 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -973,7 +973,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pcc->mute = pcc->sinf[SINF_MUTE];
pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
- result = pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+ pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
/* add sysfs attributes */
result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index ca684ed760d1..a9d2a4b98e57 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
},
{
/* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB3163",
+ .ident = "Beckhoff Baytrail",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB4063",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB6263",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB6363",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
},
},
{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0d9e2ddbf904..dd60c9397d35 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -175,6 +175,12 @@ enum tpacpi_hkey_event_t {
or port replicator */
TP_HKEY_EV_HOTPLUG_UNDOCK = 0x4011, /* undocked from hotplug
dock or port replicator */
+ /*
+ * Thinkpad X1 Tablet series devices emit 0x4012 and 0x4013
+ * when keyboard cover is attached, detached or folded onto the back
+ */
+ TP_HKEY_EV_KBD_COVER_ATTACH = 0x4012, /* keyboard cover attached */
+ TP_HKEY_EV_KBD_COVER_DETACH = 0x4013, /* keyboard cover detached or folded back */
/* User-interface events */
TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
@@ -3991,6 +3997,23 @@ static bool hotkey_notify_dockevent(const u32 hkey,
pr_info("undocked from hotplug port replicator\n");
return true;
+ /*
+ * Deliberately ignore attaching and detaching the keybord cover to avoid
+ * duplicates from intel-vbtn, which already emits SW_TABLET_MODE events
+ * to userspace.
+ *
+ * Please refer to the following thread for more information and a preliminary
+ * implementation using the GTOP ("Get Tablet OPtions") interface that could be
+ * extended to other attachment options of the ThinkPad X1 Tablet series, such as
+ * the Pico cartridge dock module:
+ * https://lore.kernel.org/platform-driver-x86/38cb8265-1e30-d547-9e12-b4ae290be737@a-kobel.de/
+ */
+ case TP_HKEY_EV_KBD_COVER_ATTACH:
+ case TP_HKEY_EV_KBD_COVER_DETACH:
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
default:
return false;
}
@@ -4088,7 +4111,7 @@ static bool hotkey_notify_6xxx(const u32 hkey,
return true;
case TP_HKEY_EV_KEY_FN_ESC:
- /* Get the media key status to foce the status LED to update */
+ /* Get the media key status to force the status LED to update */
acpi_evalf(hkey_handle, NULL, "GMKS", "v");
*send_acpi_ev = false;
*ignore_acpi_ev = true;
@@ -6260,6 +6283,7 @@ enum thermal_access_mode {
enum { /* TPACPI_THERMAL_TPEC_* */
TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
+ TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */
TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
@@ -6272,6 +6296,8 @@ struct ibm_thermal_sensors_struct {
};
static enum thermal_access_mode thermal_read_mode;
+static const struct attribute_group *thermal_attr_group;
+static bool thermal_use_labels;
/* idx is zero-based */
static int thermal_get_sensor(int idx, s32 *value)
@@ -6454,11 +6480,33 @@ static const struct attribute_group thermal_temp_input8_group = {
#undef THERMAL_SENSOR_ATTR_TEMP
#undef THERMAL_ATTRS
+static ssize_t temp1_label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "CPU\n");
+}
+static DEVICE_ATTR_RO(temp1_label);
+
+static ssize_t temp2_label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "GPU\n");
+}
+static DEVICE_ATTR_RO(temp2_label);
+
+static struct attribute *temp_label_attributes[] = {
+ &dev_attr_temp1_label.attr,
+ &dev_attr_temp2_label.attr,
+ NULL
+};
+
+static const struct attribute_group temp_label_attr_group = {
+ .attrs = temp_label_attributes,
+};
+
/* --------------------------------------------------------------------- */
static int __init thermal_init(struct ibm_init_struct *iibm)
{
- u8 t, ta1, ta2;
+ u8 t, ta1, ta2, ver = 0;
int i;
int acpi_tmp7;
int res;
@@ -6473,7 +6521,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
* 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for
* non-implemented, thermal sensors return 0x80 when
* not available
+ * The above rule is unfortunately flawed. This has been seen with
+ * 0xC2 (power supply ID) causing thermal control problems.
+ * The EC version can be determined by offset 0xEF and at least for
+ * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
+ * are not thermal registers.
*/
+ if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
+ pr_warn("Thinkpad ACPI EC unable to access EC version\n");
ta1 = ta2 = 0;
for (i = 0; i < 8; i++) {
@@ -6483,11 +6538,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
ta1 = 0;
break;
}
- if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
- ta2 |= t;
- } else {
- ta1 = 0;
- break;
+ if (ver < 3) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+ ta2 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
}
}
if (ta1 == 0) {
@@ -6500,9 +6557,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
thermal_read_mode = TPACPI_THERMAL_NONE;
}
} else {
- thermal_read_mode =
- (ta2 != 0) ?
- TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ if (ver >= 3) {
+ thermal_read_mode = TPACPI_THERMAL_TPEC_8;
+ thermal_use_labels = true;
+ } else {
+ thermal_read_mode =
+ (ta2 != 0) ?
+ TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ }
}
} else if (acpi_tmp7) {
if (tpacpi_is_ibm() &&
@@ -6524,44 +6586,40 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
switch (thermal_read_mode) {
case TPACPI_THERMAL_TPEC_16:
- res = sysfs_create_group(&tpacpi_hwmon->kobj,
- &thermal_temp_input16_group);
- if (res)
- return res;
+ thermal_attr_group = &thermal_temp_input16_group;
break;
case TPACPI_THERMAL_TPEC_8:
case TPACPI_THERMAL_ACPI_TMP07:
case TPACPI_THERMAL_ACPI_UPDT:
- res = sysfs_create_group(&tpacpi_hwmon->kobj,
- &thermal_temp_input8_group);
- if (res)
- return res;
+ thermal_attr_group = &thermal_temp_input8_group;
break;
case TPACPI_THERMAL_NONE:
default:
return 1;
}
+ res = sysfs_create_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+ if (res)
+ return res;
+
+ if (thermal_use_labels) {
+ res = sysfs_create_group(&tpacpi_hwmon->kobj, &temp_label_attr_group);
+ if (res) {
+ sysfs_remove_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+ return res;
+ }
+ }
+
return 0;
}
static void thermal_exit(void)
{
- switch (thermal_read_mode) {
- case TPACPI_THERMAL_TPEC_16:
- sysfs_remove_group(&tpacpi_hwmon->kobj,
- &thermal_temp_input16_group);
- break;
- case TPACPI_THERMAL_TPEC_8:
- case TPACPI_THERMAL_ACPI_TMP07:
- case TPACPI_THERMAL_ACPI_UPDT:
- sysfs_remove_group(&tpacpi_hwmon->kobj,
- &thermal_temp_input8_group);
- break;
- case TPACPI_THERMAL_NONE:
- default:
- break;
- }
+ if (thermal_attr_group)
+ sysfs_remove_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+
+ if (thermal_use_labels)
+ sysfs_remove_group(&tpacpi_hwmon->kobj, &temp_label_attr_group);
}
static int thermal_read(struct seq_file *m)
@@ -10050,6 +10108,7 @@ static struct ibm_struct proxsensor_driver_data = {
*/
#define DYTC_CMD_SET 1 /* To enable/disable IC function mode */
+#define DYTC_CMD_MMC_GET 8 /* To get current MMC function and mode */
#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
@@ -10066,6 +10125,10 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_MODE_PERFORM 2 /* High power mode aka performance */
#define DYTC_MODE_LOWPOWER 3 /* Low power mode */
#define DYTC_MODE_BALANCE 0xF /* Default mode aka balanced */
+#define DYTC_MODE_MMC_BALANCE 0 /* Default mode from MMC_GET, aka balanced */
+
+#define DYTC_ERR_MASK 0xF /* Bits 0-3 in cmd result are the error result */
+#define DYTC_ERR_SUCCESS 1 /* CMD completed successful */
#define DYTC_SET_COMMAND(function, mode, on) \
(DYTC_CMD_SET | (function) << DYTC_SET_FUNCTION_BIT | \
@@ -10080,6 +10143,7 @@ static bool dytc_profile_available;
static enum platform_profile_option dytc_current_profile;
static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
+static bool dytc_mmc_get_available;
static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
{
@@ -10088,6 +10152,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
*profile = PLATFORM_PROFILE_LOW_POWER;
break;
case DYTC_MODE_BALANCE:
+ case DYTC_MODE_MMC_BALANCE:
*profile = PLATFORM_PROFILE_BALANCED;
break;
case DYTC_MODE_PERFORM:
@@ -10165,7 +10230,6 @@ static int dytc_cql_command(int command, int *output)
if (err)
return err;
}
-
return cmd_err;
}
@@ -10222,7 +10286,10 @@ static void dytc_profile_refresh(void)
int perfmode;
mutex_lock(&dytc_mutex);
- err = dytc_cql_command(DYTC_CMD_GET, &output);
+ if (dytc_mmc_get_available)
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ else
+ err = dytc_cql_command(DYTC_CMD_GET, &output);
mutex_unlock(&dytc_mutex);
if (err)
return;
@@ -10271,6 +10338,16 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (dytc_version >= 5) {
dbg_printk(TPACPI_DBG_INIT,
"DYTC version %d: thermal mode available\n", dytc_version);
+ /*
+ * Check if MMC_GET functionality available
+ * Version > 6 and return success from MMC_GET command
+ */
+ dytc_mmc_get_available = false;
+ if (dytc_version >= 6) {
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS))
+ dytc_mmc_get_available = true;
+ }
/* Create platform_profile structure and register */
err = platform_profile_register(&dytc_profile);
/*
@@ -10473,6 +10550,111 @@ static struct ibm_struct kbdlang_driver_data = {
.exit = kbdlang_exit,
};
+/*************************************************************************
+ * DPRC(Dynamic Power Reduction Control) subdriver, for the Lenovo WWAN
+ * and WLAN feature.
+ */
+#define DPRC_GET_WWAN_ANTENNA_TYPE 0x40000
+#define DPRC_WWAN_ANTENNA_TYPE_A_BIT BIT(4)
+#define DPRC_WWAN_ANTENNA_TYPE_B_BIT BIT(8)
+static bool has_antennatype;
+static int wwan_antennatype;
+
+static int dprc_command(int command, int *output)
+{
+ acpi_handle dprc_handle;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DPRC", &dprc_handle))) {
+ /* Platform doesn't support DPRC */
+ return -ENODEV;
+ }
+
+ if (!acpi_evalf(dprc_handle, output, NULL, "dd", command))
+ return -EIO;
+
+ /*
+ * METHOD_ERR gets returned on devices where few commands are not supported
+ * for example command to get WWAN Antenna type command is not supported on
+ * some devices.
+ */
+ if (*output & METHOD_ERR)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int get_wwan_antenna(int *wwan_antennatype)
+{
+ int output, err;
+
+ /* Get current Antenna type */
+ err = dprc_command(DPRC_GET_WWAN_ANTENNA_TYPE, &output);
+ if (err)
+ return err;
+
+ if (output & DPRC_WWAN_ANTENNA_TYPE_A_BIT)
+ *wwan_antennatype = 1;
+ else if (output & DPRC_WWAN_ANTENNA_TYPE_B_BIT)
+ *wwan_antennatype = 2;
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
+/* sysfs wwan antenna type entry */
+static ssize_t wwan_antenna_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ switch (wwan_antennatype) {
+ case 1:
+ return sysfs_emit(buf, "type a\n");
+ case 2:
+ return sysfs_emit(buf, "type b\n");
+ default:
+ return -ENODATA;
+ }
+}
+static DEVICE_ATTR_RO(wwan_antenna_type);
+
+static int tpacpi_dprc_init(struct ibm_init_struct *iibm)
+{
+ int wwanantenna_err, err;
+
+ wwanantenna_err = get_wwan_antenna(&wwan_antennatype);
+ /*
+ * If support isn't available (ENODEV) then quit, but don't
+ * return an error.
+ */
+ if (wwanantenna_err == -ENODEV)
+ return 0;
+
+ /* if there was an error return it */
+ if (wwanantenna_err && (wwanantenna_err != -ENODEV))
+ return wwanantenna_err;
+ else if (!wwanantenna_err)
+ has_antennatype = true;
+
+ if (has_antennatype) {
+ err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_wwan_antenna_type.attr);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void dprc_exit(void)
+{
+ if (has_antennatype)
+ sysfs_remove_file(&tpacpi_pdev->dev.kobj, &dev_attr_wwan_antenna_type.attr);
+}
+
+static struct ibm_struct dprc_driver_data = {
+ .name = "dprc",
+ .exit = dprc_exit,
+};
+
/****************************************************************************
****************************************************************************
*
@@ -10977,6 +11159,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.init = tpacpi_kbdlang_init,
.data = &kbdlang_driver_data,
},
+ {
+ .init = tpacpi_dprc_init,
+ .data = &dprc_driver_data,
+ },
};
static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index c44a6e8dceb8..90fe4f8f3c2c 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -715,6 +715,32 @@ static const struct ts_dmi_data techbite_arc_11_6_data = {
.properties = techbite_arc_11_6_props,
};
+static const struct property_entry teclast_tbook11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data teclast_tbook11_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3692-teclast-tbook11.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 43560,
+ .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
+ 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
+ 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
+ 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = teclast_tbook11_props,
+};
+
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -1244,6 +1270,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Teclast Tbook 11 */
+ .driver_data = (void *)&teclast_tbook11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TbooK 11"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
+ },
+ },
+ {
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
@@ -1355,7 +1390,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
if (has_acpi_companion(dev) &&
!strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
- error = device_add_properties(dev, ts_data->properties);
+ error = device_create_managed_software_node(dev, ts_data->properties, NULL);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
}
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index 66b434d6307f..80137afb9753 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -86,13 +86,12 @@ static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
return ret;
}
-static int wmi_bmof_remove(struct wmi_device *wdev)
+static void wmi_bmof_remove(struct wmi_device *wdev)
{
struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
sysfs_remove_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
kfree(priv->bmofdata);
- return 0;
}
static const struct wmi_device_id wmi_bmof_id_table[] = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index c669676ea8e8..62e0d56a3332 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -32,7 +32,6 @@
#include <linux/fs.h>
#include <uapi/linux/wmi.h>
-ACPI_MODULE_NAME("wmi");
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
MODULE_LICENSE("GPL");
@@ -986,7 +985,6 @@ static int wmi_dev_remove(struct device *dev)
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver =
container_of(dev->driver, struct wmi_driver, driver);
- int ret = 0;
if (wdriver->filter_callback) {
misc_deregister(&wblock->char_dev);
@@ -995,12 +993,12 @@ static int wmi_dev_remove(struct device *dev)
}
if (wdriver->remove)
- ret = wdriver->remove(dev_to_wdev(dev));
+ wdriver->remove(dev_to_wdev(dev));
if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
dev_warn(dev, "failed to disable device\n");
- return ret;
+ return 0;
}
static struct class wmi_bus_class = {
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 8337c99d2ce2..97440462aa25 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -26,8 +26,6 @@
#define XO15_EBOOK_HID "XO15EBK"
#define XO15_EBOOK_DEVICE_NAME "EBook Switch"
-ACPI_MODULE_NAME(MODULE_NAME);
-
MODULE_DESCRIPTION("OLPC XO-1.5 ebook switch driver");
MODULE_LICENSE("GPL");
@@ -66,8 +64,8 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
ebook_send_state(device);
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle,
+ "Unsupported event [0x%x]\n", event);
break;
}
}
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index ddecf25b5dd4..d7894f178bd4 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca)
return ret;
}
+/**
+ * cec_add_elem - Add an element to the CEC array.
+ * @pfn: page frame number to insert
+ *
+ * Return values:
+ * - <0: on error
+ * - 0: on success
+ * - >0: when the inserted pfn was offlined
+ */
static int cec_add_elem(u64 pfn)
{
struct ce_array *ca = &ce_arr;
+ int count, err, ret = 0;
unsigned int to = 0;
- int count, ret = 0;
/*
* We can be called very early on the identify_cpu() path where we are
@@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn)
if (ca->n == MAX_ELEMS)
WARN_ON(!del_lru_elem_unlocked(ca));
- ret = find_elem(ca, pfn, &to);
- if (ret < 0) {
+ err = find_elem(ca, pfn, &to);
+ if (err < 0) {
/*
* Shift range [to-end] to make room for one more element.
*/
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index 7b0cd08db446..ba020a45f238 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -125,7 +125,7 @@ static const struct regulator_ops vid_ops = {
static const struct regulator_desc regulators[] = {
BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
- 0x80, 600000, 10000, 0x3c),
+ 0x6f, 600000, 10000, 0x3c),
BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
16, 1625000, 25000, 0),
BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
@@ -134,7 +134,7 @@ static const struct regulator_desc regulators[] = {
11, 2800000, 100000, 0),
BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
BD9571MWV_DVFS_MONIVDAC, 0x7f,
- 0x80, 600000, 10000, 0x3c),
+ 0x6f, 600000, 10000, 0x3c),
};
#ifdef CONFIG_PM_SLEEP
@@ -174,7 +174,7 @@ static ssize_t backup_mode_show(struct device *dev,
{
struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
+ return sysfs_emit(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
}
static ssize_t backup_mode_store(struct device *dev,
@@ -301,7 +301,7 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s regulator\n",
- pdev->name);
+ regulators[i].name);
return PTR_ERR(rdev);
}
}
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 2667919d76b3..dcb380e868df 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -450,6 +450,24 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
if (len == 0)
return NULL;
+ /*
+ * GNU binutils do not support multiple address spaces. The GNU
+ * linker's default linker script places IRAM at an arbitrary high
+ * offset, in order to differentiate it from DRAM. Hence we need to
+ * strip the artificial offset in the IRAM addresses coming from the
+ * ELF file.
+ *
+ * The TI proprietary linker would never set those higher IRAM address
+ * bits anyway. PRU architecture limits the program counter to 16-bit
+ * word-address range. This in turn corresponds to 18-bit IRAM
+ * byte-address range for ELF.
+ *
+ * Two more bits are added just in case to make the final 20-bit mask.
+ * Idea is to have a safeguard in case TI decides to add banking
+ * in future SoCs.
+ */
+ da &= 0xfffff;
+
if (da >= PRU_IRAM_DA &&
da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
offset = da - PRU_IRAM_DA;
@@ -585,7 +603,7 @@ pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
break;
}
- if (pru->data->is_k3 && is_iram) {
+ if (pru->data->is_k3) {
ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
filesz);
if (ret) {
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
index 5521c4437ffa..7c007dd7b200 100644
--- a/drivers/remoteproc/qcom_pil_info.c
+++ b/drivers/remoteproc/qcom_pil_info.c
@@ -56,7 +56,7 @@ static int qcom_pil_info_init(void)
memset_io(base, 0, resource_size(&imem));
_reloc.base = base;
- _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+ _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
return 0;
}
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index d126bb877250..ba6a3aa8d954 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -20,6 +20,11 @@
#ifndef HPSA_CMD_H
#define HPSA_CMD_H
+#include <linux/compiler.h>
+
+#include <linux/build_bug.h> /* static_assert */
+#include <linux/stddef.h> /* offsetof */
+
/* general boundary defintions */
#define SENSEINFOBYTES 32 /* may vary between hbas */
#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
@@ -200,12 +205,10 @@ union u64bit {
MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
/* SCSI-3 Commands */
-#pragma pack(1)
-
#define HPSA_INQUIRY 0x12
struct InquiryData {
u8 data_byte[36];
-};
+} __packed;
#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
@@ -221,7 +224,7 @@ struct raid_map_disk_data {
u8 xor_mult[2]; /**< XOR multipliers for this position,
* valid for data disks only */
u8 reserved[2];
-};
+} __packed;
struct raid_map_data {
__le32 structure_size; /* Size of entire structure in bytes */
@@ -247,14 +250,14 @@ struct raid_map_data {
__le16 dekindex; /* Data encryption key index. */
u8 reserved[16];
struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
-};
+} __packed;
struct ReportLUNdata {
u8 LUNListLength[4];
u8 extended_response_flag;
u8 reserved[3];
u8 LUN[HPSA_MAX_LUN][8];
-};
+} __packed;
struct ext_report_lun_entry {
u8 lunid[8];
@@ -269,20 +272,20 @@ struct ext_report_lun_entry {
u8 lun_count; /* multi-lun device, how many luns */
u8 redundant_paths;
u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
-};
+} __packed;
struct ReportExtendedLUNdata {
u8 LUNListLength[4];
u8 extended_response_flag;
u8 reserved[3];
struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN];
-};
+} __packed;
struct SenseSubsystem_info {
u8 reserved[36];
u8 portname[8];
u8 reserved1[1108];
-};
+} __packed;
/* BMIC commands */
#define BMIC_READ 0x26
@@ -317,7 +320,7 @@ union SCSI3Addr {
u8 Targ:6;
u8 Mode:2; /* b10 */
} LogUnit;
-};
+} __packed;
struct PhysDevAddr {
u32 TargetId:24;
@@ -325,20 +328,20 @@ struct PhysDevAddr {
u32 Mode:2;
/* 2 level target device addr */
union SCSI3Addr Target[2];
-};
+} __packed;
struct LogDevAddr {
u32 VolId:30;
u32 Mode:2;
u8 reserved[4];
-};
+} __packed;
union LUNAddr {
u8 LunAddrBytes[8];
union SCSI3Addr SCSI3Lun[4];
struct PhysDevAddr PhysDev;
struct LogDevAddr LogDev;
-};
+} __packed;
struct CommandListHeader {
u8 ReplyQueue;
@@ -346,7 +349,7 @@ struct CommandListHeader {
__le16 SGTotal;
__le64 tag;
union LUNAddr LUN;
-};
+} __packed;
struct RequestBlock {
u8 CDBLen;
@@ -365,18 +368,18 @@ struct RequestBlock {
#define GET_DIR(tad) (((tad) >> 6) & 0x03)
u16 Timeout;
u8 CDB[16];
-};
+} __packed;
struct ErrDescriptor {
__le64 Addr;
__le32 Len;
-};
+} __packed;
struct SGDescriptor {
__le64 Addr;
__le32 Len;
__le32 Ext;
-};
+} __packed;
union MoreErrInfo {
struct {
@@ -390,7 +393,8 @@ union MoreErrInfo {
u8 offense_num; /* byte # of offense 0-base */
u32 offense_value;
} Invalid_Cmd;
-};
+} __packed;
+
struct ErrorInfo {
u8 ScsiStatus;
u8 SenseLen;
@@ -398,7 +402,7 @@ struct ErrorInfo {
u32 ResidualCnt;
union MoreErrInfo MoreErrInfo;
u8 SenseInfo[SENSEINFOBYTES];
-};
+} __packed;
/* Command types */
#define CMD_IOCTL_PEND 0x01
#define CMD_SCSI 0x03
@@ -453,6 +457,15 @@ struct CommandList {
atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
} __aligned(COMMANDLIST_ALIGNMENT);
+/*
+ * Make sure our embedded atomic variable is aligned. Otherwise we break atomic
+ * operations on architectures that don't support unaligned atomics like IA64.
+ *
+ * The assert guards against reintroductin against unwanted __packed to
+ * the struct CommandList.
+ */
+static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0);
+
/* Max S/G elements in I/O accelerator command */
#define IOACCEL1_MAXSGENTRIES 24
#define IOACCEL2_MAXSGENTRIES 28
@@ -489,7 +502,7 @@ struct io_accel1_cmd {
__le64 host_addr; /* 0x70 - 0x77 */
u8 CISS_LUN[8]; /* 0x78 - 0x7F */
struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
-} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
#define IOACCEL1_FUNCTION_SCSIIO 0x00
#define IOACCEL1_SGLOFFSET 32
@@ -519,7 +532,7 @@ struct ioaccel2_sg_element {
u8 chain_indicator;
#define IOACCEL2_CHAIN 0x80
#define IOACCEL2_LAST_SG 0x40
-};
+} __packed;
/*
* SCSI Response Format structure for IO Accelerator Mode 2
@@ -559,7 +572,7 @@ struct io_accel2_scsi_response {
u8 sense_data_len; /* sense/response data length */
u8 resid_cnt[4]; /* residual count */
u8 sense_data_buff[32]; /* sense/response data buffer */
-};
+} __packed;
/*
* Structure for I/O accelerator (mode 2 or m2) commands.
@@ -592,7 +605,7 @@ struct io_accel2_cmd {
__le32 tweak_upper; /* Encryption tweak, upper 4 bytes */
struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
struct io_accel2_scsi_response error_data;
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
/*
* defines for Mode 2 command struct
@@ -618,7 +631,7 @@ struct hpsa_tmf_struct {
__le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */
__le64 error_ptr; /* Error Pointer */
__le32 error_len; /* Error Length */
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
/* Configuration Table Structure */
struct HostWrite {
@@ -626,7 +639,7 @@ struct HostWrite {
__le32 command_pool_addr_hi;
__le32 CoalIntDelay;
__le32 CoalIntCount;
-};
+} __packed;
#define SIMPLE_MODE 0x02
#define PERFORMANT_MODE 0x04
@@ -675,7 +688,7 @@ struct CfgTable {
#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
__le32 clear_event_notify;
-};
+} __packed;
#define NUM_BLOCKFETCH_ENTRIES 8
struct TransTable_struct {
@@ -686,14 +699,14 @@ struct TransTable_struct {
__le32 RepQCtrAddrHigh32;
#define MAX_REPLY_QUEUES 64
struct vals32 RepQAddr[MAX_REPLY_QUEUES];
-};
+} __packed;
struct hpsa_pci_info {
unsigned char bus;
unsigned char dev_fn;
unsigned short domain;
u32 board_id;
-};
+} __packed;
struct bmic_identify_controller {
u8 configured_logical_drive_count; /* offset 0 */
@@ -702,7 +715,7 @@ struct bmic_identify_controller {
u8 pad2[136];
u8 controller_mode; /* offset 292 */
u8 pad3[32];
-};
+} __packed;
struct bmic_identify_physical_device {
@@ -845,7 +858,7 @@ struct bmic_identify_physical_device {
u8 max_link_rate[256];
u8 neg_phys_link_rate[256];
u8 box_conn_name[8];
-} __attribute((aligned(512)));
+} __packed __attribute((aligned(512)));
struct bmic_sense_subsystem_info {
u8 primary_slot_number;
@@ -858,7 +871,7 @@ struct bmic_sense_subsystem_info {
u8 secondary_array_serial_number[32];
u8 secondary_cache_serial_number[32];
u8 pad[332];
-};
+} __packed;
struct bmic_sense_storage_box_params {
u8 reserved[36];
@@ -870,7 +883,6 @@ struct bmic_sense_storage_box_params {
u8 reserver_3[84];
u8 phys_connector[2];
u8 reserved_4[296];
-};
+} __packed;
-#pragma pack()
#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 04633e5157e9..4834219497ee 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3179,9 +3179,10 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
}
}
-static void iscsi_start_session_recovery(struct iscsi_session *session,
- struct iscsi_conn *conn, int flag)
+void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
int old_stop_stage;
mutex_lock(&session->eh_mutex);
@@ -3239,27 +3240,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
-
-void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
-{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_session *session = conn->session;
-
- switch (flag) {
- case STOP_CONN_RECOVER:
- cls_conn->state = ISCSI_CONN_FAILED;
- break;
- case STOP_CONN_TERM:
- cls_conn->state = ISCSI_CONN_DOWN;
- break;
- default:
- iscsi_conn_printk(KERN_ERR, conn,
- "invalid stop flag %d\n", flag);
- return;
- }
-
- iscsi_start_session_recovery(session, conn, flag);
-}
EXPORT_SYMBOL_GPL(iscsi_conn_stop);
int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 024e5a550759..8b9a39077dba 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
+ task->data_dir = qc->dma_dir;
+ } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+ task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
- }
-
- if (qc->tf.protocol == ATA_PROT_NODATA)
- task->data_dir = DMA_NONE;
- else
task->data_dir = qc->dma_dir;
+ }
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
task->task_state_flags = SAS_TASK_STATE_PENDING;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 49bf2f70a470..31e5455d280c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
PM8001_EVENT_LOG_SIZE;
pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01;
pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
- for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
@@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
}
- for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
@@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
read_outbnd_queue_table(pm8001_ha);
/* update main config table ,inbound table and outbound table */
update_main_config_table(pm8001_ha);
- for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+ for (i = 0; i < pm8001_ha->max_q_num; i++)
update_inbnd_queue_table(pm8001_ha, i);
- for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+ for (i = 0; i < pm8001_ha->max_q_num; i++)
update_outbnd_queue_table(pm8001_ha, i);
/* 8081 controller donot require these operations */
if (deviceid != 0x8081 && deviceid != 0x0042) {
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f4bf62b007a0..441f0152193f 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2474,10 +2474,22 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
* it works.
*/
mutex_lock(&conn_mutex);
+ switch (flag) {
+ case STOP_CONN_RECOVER:
+ conn->state = ISCSI_CONN_FAILED;
+ break;
+ case STOP_CONN_TERM:
+ conn->state = ISCSI_CONN_DOWN;
+ break;
+ default:
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "invalid stop flag %d\n", flag);
+ goto unlock;
+ }
+
conn->transport->stop_conn(conn, flag);
- conn->state = ISCSI_CONN_DOWN;
+unlock:
mutex_unlock(&conn_mutex);
-
}
static void stop_conn_work_fn(struct work_struct *work)
@@ -2968,7 +2980,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
mutex_lock(&conn->ep_mutex);
conn->ep = NULL;
mutex_unlock(&conn->ep_mutex);
- conn->state = ISCSI_CONN_DOWN;
+ conn->state = ISCSI_CONN_FAILED;
}
transport->ep_disconnect(ep);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 1e939a2a387f..98a34ed10f1a 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
res = mutex_lock_interruptible(&rport->mutex);
if (res)
goto out;
- if (rport->state != SRP_RPORT_FAIL_FAST)
+ if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
/*
* sdev state must be SDEV_TRANSPORT_OFFLINE, transition
* to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c86760788c72..d3d05e997c13 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6386,37 +6386,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
unsigned long flags;
- int free_slot, task_tag, err;
+ int task_tag, err;
/*
- * Get free slot, sleep if slots are unavailable.
- * Even though we use wait_event() which sleeps indefinitely,
- * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+ * blk_get_request() is used here only to get a free tag.
*/
req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req->end_io_data = &wait;
- free_slot = req->tag;
- WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
- task_tag = hba->nutrs + free_slot;
+ blk_mq_start_request(req);
+ task_tag = req->tag;
treq->req_header.dword_0 |= cpu_to_be32(task_tag);
- memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
- ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+ memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
+ ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
/* send command to the controller */
- __set_bit(free_slot, &hba->outstanding_tasks);
+ __set_bit(task_tag, &hba->outstanding_tasks);
/* Make sure descriptors are ready before ringing the task doorbell */
wmb();
- ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
@@ -6436,24 +6433,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
- if (ufshcd_clear_tm_cmd(hba, free_slot))
- dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
- __func__, free_slot);
+ if (ufshcd_clear_tm_cmd(hba, task_tag))
+ dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
+ __func__, task_tag);
err = -ETIMEDOUT;
} else {
err = 0;
- memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+ memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
}
spin_lock_irqsave(hba->host->host_lock, flags);
- __clear_bit(free_slot, &hba->outstanding_tasks);
+ __clear_bit(task_tag, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba);
blk_put_request(req);
- ufshcd_release(hba);
return err;
}
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index a14684ffe4c1..ca4f4ca413f1 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -179,6 +179,21 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
return 0;
}
+static bool __init intc_map(struct irq_domain *domain, int irq)
+{
+ if (!irq_to_desc(irq) && irq_alloc_desc_at(irq, NUMA_NO_NODE) != irq) {
+ pr_err("uname to allocate IRQ %d\n", irq);
+ return false;
+ }
+
+ if (irq_domain_associate(domain, irq, irq)) {
+ pr_err("domain association failure\n");
+ return false;
+ }
+
+ return true;
+}
+
int __init register_intc_controller(struct intc_desc *desc)
{
unsigned int i, k, smp;
@@ -311,24 +326,12 @@ int __init register_intc_controller(struct intc_desc *desc)
for (i = 0; i < hw->nr_vectors; i++) {
struct intc_vect *vect = hw->vectors + i;
unsigned int irq = evt2irq(vect->vect);
- int res;
if (!vect->enum_id)
continue;
- res = irq_create_identity_mapping(d->domain, irq);
- if (unlikely(res)) {
- if (res == -EEXIST) {
- res = irq_domain_associate(d->domain, irq, irq);
- if (unlikely(res)) {
- pr_err("domain association failure\n");
- continue;
- }
- } else {
- pr_err("can't identity map IRQ %d\n", irq);
- continue;
- }
- }
+ if (!intc_map(d->domain, irq))
+ continue;
intc_irq_xlate_set(irq, vect->enum_id, d);
intc_register_irq(desc, d, vect->enum_id, irq);
@@ -345,22 +348,8 @@ int __init register_intc_controller(struct intc_desc *desc)
* IRQ support, each vector still needs to have
* its own backing irq_desc.
*/
- res = irq_create_identity_mapping(d->domain, irq2);
- if (unlikely(res)) {
- if (res == -EEXIST) {
- res = irq_domain_associate(d->domain,
- irq2, irq2);
- if (unlikely(res)) {
- pr_err("domain association "
- "failure\n");
- continue;
- }
- } else {
- pr_err("can't identity map IRQ %d\n",
- irq);
- continue;
- }
- }
+ if (!intc_map(d->domain, irq2))
+ continue;
vect2->enum_id = 0;
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index a1b9be1d105a..fde4edd83c14 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -186,7 +186,7 @@ struct qm_eqcr_entry {
__be32 tag;
struct qm_fd fd;
u8 __reserved3[32];
-} __packed;
+} __packed __aligned(8);
#define QM_EQCR_VERB_VBIT 0x80
#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 1fd29f93ff6d..5bdfb1565c14 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -756,6 +756,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
int i, err;
const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+ if (has_acpi_companion(se->dev))
+ return 0;
+
for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
if (!icc_names[i])
continue;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d0e7ed8f28cc..e5c443bfbdf9 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
target_get_sess_cmd(&cmd->se_cmd, true);
+ cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
@@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (cmd->sense_reason)
goto attach_cmd;
- /* only used for printks or comparing with ->ref_task_tag */
- cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
if (cmd->sense_reason)
goto attach_cmd;
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 620bcf586ee2..c44fad2b9fbb 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
ret = tb_retimer_nvm_add(rt);
if (ret) {
dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
- device_del(&rt->dev);
+ device_unregister(&rt->dev);
return ret;
}
@@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
*/
int tb_retimer_scan(struct tb_port *port)
{
- u32 status[TB_MAX_RETIMER_INDEX] = {};
+ u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
int ret, i, last_idx = 0;
if (!port->cap_usb4)
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index f2ebbacd932e..d7d4bdd57f46 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -1128,6 +1128,10 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
return -ESHUTDOWN;
}
+ /* Requests has been dequeued during disabling endpoint. */
+ if (!(pep->ep_state & EP_ENABLED))
+ return 0;
+
spin_lock_irqsave(&pdev->lock, flags);
ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
spin_unlock_irqrestore(&pdev->lock, flags);
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 8f1de1fbbeed..d8d3892e5a69 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
dev_info(dev, "stub up\n");
+ mutex_lock(&sdev->ud.sysfs_lock);
spin_lock_irq(&sdev->ud.lock);
if (sdev->ud.status != SDEV_ST_AVAILABLE) {
@@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
if (IS_ERR(tcp_rx)) {
sockfd_put(socket);
- return -EINVAL;
+ goto unlock_mutex;
}
tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
if (IS_ERR(tcp_tx)) {
kthread_stop(tcp_rx);
sockfd_put(socket);
- return -EINVAL;
+ goto unlock_mutex;
}
/* get task structs now */
@@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
wake_up_process(sdev->ud.tcp_rx);
wake_up_process(sdev->ud.tcp_tx);
+ mutex_unlock(&sdev->ud.sysfs_lock);
+
} else {
dev_info(dev, "stub down\n");
@@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
spin_unlock_irq(&sdev->ud.lock);
usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
+ mutex_unlock(&sdev->ud.sysfs_lock);
}
return count;
@@ -130,6 +134,8 @@ sock_err:
sockfd_put(socket);
err:
spin_unlock_irq(&sdev->ud.lock);
+unlock_mutex:
+ mutex_unlock(&sdev->ud.sysfs_lock);
return -EINVAL;
}
static DEVICE_ATTR_WO(usbip_sockfd);
@@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
sdev->ud.side = USBIP_STUB;
sdev->ud.status = SDEV_ST_AVAILABLE;
spin_lock_init(&sdev->ud.lock);
+ mutex_init(&sdev->ud.sysfs_lock);
sdev->ud.tcp_socket = NULL;
sdev->ud.sockfd = -1;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index d60ce17d3dd2..ea2a20e6d27d 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -263,6 +263,9 @@ struct usbip_device {
/* lock for status */
spinlock_t lock;
+ /* mutex for synchronizing sysfs store paths */
+ struct mutex sysfs_lock;
+
int sockfd;
struct socket *tcp_socket;
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 5d88917c9631..086ca76dd053 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work)
while ((ud = get_event()) != NULL) {
usbip_dbg_eh("pending event %lx\n", ud->event);
+ mutex_lock(&ud->sysfs_lock);
/*
* NOTE: shutdown must come first.
* Shutdown the device.
@@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work)
ud->eh_ops.unusable(ud);
unset_event(ud, USBIP_EH_UNUSABLE);
}
+ mutex_unlock(&ud->sysfs_lock);
wake_up(&ud->eh_waitq);
}
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index a20a8380ca0c..4ba6bcdaa8e9 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1101,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev)
vdev->ud.side = USBIP_VHCI;
vdev->ud.status = VDEV_ST_NULL;
spin_lock_init(&vdev->ud.lock);
+ mutex_init(&vdev->ud.sysfs_lock);
INIT_LIST_HEAD(&vdev->priv_rx);
INIT_LIST_HEAD(&vdev->priv_tx);
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index c4b4256e5dad..e2847cd3e6e3 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
usbip_dbg_vhci_sysfs("enter\n");
+ mutex_lock(&vdev->ud.sysfs_lock);
+
/* lock */
spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->ud.lock);
@@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
/* unlock */
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
+ mutex_unlock(&vdev->ud.sysfs_lock);
return -EINVAL;
}
@@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
+ mutex_unlock(&vdev->ud.sysfs_lock);
+
return 0;
}
@@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
else
vdev = &vhci->vhci_hcd_hs->vdev[rhport];
+ mutex_lock(&vdev->ud.sysfs_lock);
+
/* Extract socket from fd. */
socket = sockfd_lookup(sockfd, &err);
if (!socket) {
dev_err(dev, "failed to lookup sock");
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock_mutex;
}
if (socket->type != SOCK_STREAM) {
dev_err(dev, "Expecting SOCK_STREAM - found %d",
socket->type);
sockfd_put(socket);
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock_mutex;
}
/* create threads before locking */
tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
if (IS_ERR(tcp_rx)) {
sockfd_put(socket);
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock_mutex;
}
tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
if (IS_ERR(tcp_tx)) {
kthread_stop(tcp_rx);
sockfd_put(socket);
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock_mutex;
}
/* get task structs now */
@@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
* Will be retried from userspace
* if there's another free port.
*/
- return -EBUSY;
+ err = -EBUSY;
+ goto unlock_mutex;
}
dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
@@ -423,7 +435,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
rh_port_connect(vdev, speed);
+ dev_info(dev, "Device attached\n");
+
+ mutex_unlock(&vdev->ud.sysfs_lock);
+
return count;
+
+unlock_mutex:
+ mutex_unlock(&vdev->ud.sysfs_lock);
+ return err;
}
static DEVICE_ATTR_WO(attach);
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index c8eeabdd9b56..2bc428f2e261 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc)
init_waitqueue_head(&udc->tx_waitq);
spin_lock_init(&ud->lock);
+ mutex_init(&ud->sysfs_lock);
ud->status = SDEV_ST_AVAILABLE;
ud->side = USBIP_VUDC;
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 7383a543c6d1..f7633ee655a1 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
dev_err(dev, "no device");
return -ENODEV;
}
+ mutex_lock(&udc->ud.sysfs_lock);
spin_lock_irqsave(&udc->lock, flags);
/* Don't export what we don't have */
if (!udc->driver || !udc->pullup) {
@@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev,
wake_up_process(udc->ud.tcp_rx);
wake_up_process(udc->ud.tcp_tx);
+
+ mutex_unlock(&udc->ud.sysfs_lock);
return count;
} else {
@@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
}
spin_unlock_irqrestore(&udc->lock, flags);
+ mutex_unlock(&udc->ud.sysfs_lock);
return count;
@@ -216,6 +220,7 @@ unlock_ud:
spin_unlock_irq(&udc->ud.lock);
unlock:
spin_unlock_irqrestore(&udc->lock, flags);
+ mutex_unlock(&udc->ud.sysfs_lock);
return ret;
}
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 08f742fd2409..b6cc53ba980c 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -4,9 +4,13 @@
#ifndef __MLX5_VDPA_H__
#define __MLX5_VDPA_H__
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/vdpa.h>
#include <linux/mlx5/driver.h>
+#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
struct mlx5_vdpa_direct_mr {
u64 start;
u64 end;
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index d300f799efcd..800cfd1967ad 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -219,6 +219,11 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
}
+static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
+{
+ return &mvdev->mdev->pdev->dev;
+}
+
static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
struct vhost_iotlb *iotlb)
{
@@ -234,7 +239,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
u64 pa;
u64 paend;
struct scatterlist *sg;
- struct device *dma = mvdev->mdev->device;
+ struct device *dma = get_dma_device(mvdev);
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -273,8 +278,10 @@ done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
- if (!mr->nent)
+ if (!mr->nent) {
+ err = -ENOMEM;
goto err_map;
+ }
err = create_direct_mr(mvdev, mr);
if (err)
@@ -291,7 +298,7 @@ err_map:
static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
- struct device *dma = mvdev->mdev->device;
+ struct device *dma = get_dma_device(mvdev);
destroy_direct_mr(mvdev, mr);
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 96e6421c5d1c..6521cbd0f5c2 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -246,7 +246,8 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
if (err)
goto err_key;
- kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+ kick_addr = mdev->bar_addr + offset;
+
res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
if (!res->kick_addr) {
err = -ENOMEM;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 71397fdafa6a..4d2809c7d4e3 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
- !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+ !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
@@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
return;
}
mvq->avail_idx = attr.available_index;
+ mvq->used_idx = attr.used_index;
}
static void suspend_vqs(struct mlx5_vdpa_net *ndev)
@@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
return -EINVAL;
}
+ mvq->used_idx = state->avail_index;
mvq->avail_idx = state->avail_index;
return 0;
}
@@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
* that cares about emulating the index after vq is stopped.
*/
if (!mvq->initialized) {
- state->avail_index = mvq->avail_idx;
+ /* Firmware returns a wrong value for the available index.
+ * Since both values should be identical, we take the value of
+ * used_idx which is reported correctly.
+ */
+ state->avail_index = mvq->used_idx;
return 0;
}
@@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
return err;
}
- state->avail_index = attr.available_index;
+ state->avail_index = attr.used_index;
return 0;
}
@@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
}
}
-static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
-{
- int i;
-
- for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
- ndev->vqs[i].avail_idx = 0;
- ndev->vqs[i].used_idx = 0;
- }
-}
-
/* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{
return virtio_legacy_is_little_endian() ||
- (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+ (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
}
static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
@@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
- clear_virtqueues(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
@@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.free = mlx5_vdpa_free,
};
+static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+ u16 hw_mtu;
+ int err;
+
+ err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+ if (err)
+ return err;
+
+ *mtu = hw_mtu - MLX5V_ETH_HARD_MTU;
+ return 0;
+}
+
static int alloc_resources(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_net_resources *res = &ndev->res;
@@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
init_mvqs(ndev);
mutex_init(&ndev->reslock);
config = &ndev->config;
- err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+ err = query_mtu(mdev, &ndev->mtu);
if (err)
goto err_mtu;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 65e7e6b44578..5023e23db3bc 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1656,6 +1656,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
@@ -1664,7 +1666,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
int regnum = index - VFIO_PCI_NUM_REGIONS;
struct vfio_pci_region *region = vdev->region + regnum;
- if (region && region->ops && region->ops->mmap &&
+ if (region->ops && region->ops->mmap &&
(region->flags & VFIO_REGION_INFO_FLAG_MMAP))
return region->ops->mmap(vdev, region, vma);
return -EINVAL;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index e0a27e336293..bfa4c6ef554e 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -745,9 +745,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
+ mutex_lock(&dev->mutex);
+
r = vhost_dev_check_owner(dev);
if (r)
- return r;
+ goto unlock;
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
@@ -768,6 +770,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
r = -EINVAL;
break;
}
+unlock:
+ mutex_unlock(&dev->mutex);
return r;
}
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 4dc9077dd2ac..a7e6eea2c4a1 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -308,7 +308,7 @@ static inline int synthvid_send(struct hv_device *hdev,
VM_PKT_DATA_INBAND, 0);
if (ret)
- pr_err("Unable to send packet via vmbus\n");
+ pr_err_ratelimited("Unable to send packet via vmbus; error %d\n", ret);
return ret;
}
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index e5dcb26d85f0..1635f421ef2c 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -2,7 +2,7 @@
/*
* Watchdog driver for Marvell Armada 37xx SoCs
*
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek BehĂșn <kabel@kernel.org>
*/
#include <linux/clk.h>
@@ -366,7 +366,7 @@ static struct platform_driver armada_37xx_wdt_driver = {
module_platform_driver(armada_37xx_wdt_driver);
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
MODULE_DESCRIPTION("Armada 37xx CPU Watchdog");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index ea0efd290c37..5f1ce59b44b9 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -238,37 +238,6 @@ config XEN_PRIVCMD
depends on XEN
default m
-config XEN_STUB
- bool "Xen stub drivers"
- depends on XEN && X86_64 && BROKEN
- help
- Allow kernel to install stub drivers, to reserve space for Xen drivers,
- i.e. memory hotplug and cpu hotplug, and to block native drivers loaded,
- so that real Xen drivers can be modular.
-
- To enable Xen features like cpu and memory hotplug, select Y here.
-
-config XEN_ACPI_HOTPLUG_MEMORY
- tristate "Xen ACPI memory hotplug"
- depends on XEN_DOM0 && XEN_STUB && ACPI
- help
- This is Xen ACPI memory hotplug.
-
- Currently Xen only support ACPI memory hot-add. If you want
- to hot-add memory at runtime (the hot-added memory cannot be
- removed until machine stop), select Y/M here, otherwise select N.
-
-config XEN_ACPI_HOTPLUG_CPU
- tristate "Xen ACPI cpu hotplug"
- depends on XEN_DOM0 && XEN_STUB && ACPI
- select ACPI_CONTAINER
- help
- Xen ACPI cpu enumerating and hotplugging
-
- For hotplugging, currently Xen only support ACPI cpu hotadd.
- If you want to hotadd cpu at runtime (the hotadded cpu cannot
- be removed until machine stop), select Y/M here.
-
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index c3621b9f4012..3434593455b2 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -26,9 +26,6 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
-obj-$(CONFIG_XEN_STUB) += xen-stub.o
-obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o
-obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
obj-$(CONFIG_XEN_EFI) += efi.o
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 8236e2364eeb..7bbfd58958bc 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -110,7 +110,7 @@ struct irq_info {
unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
u64 eoi_time; /* Time in jiffies when to EOI. */
- spinlock_t lock;
+ raw_spinlock_t lock;
union {
unsigned short virq;
@@ -312,7 +312,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
info->evtchn = evtchn;
info->cpu = cpu;
info->mask_reason = EVT_MASK_REASON_EXPLICIT;
- spin_lock_init(&info->lock);
+ raw_spin_lock_init(&info->lock);
ret = set_evtchn_to_irq(evtchn, irq);
if (ret < 0)
@@ -472,28 +472,28 @@ static void do_mask(struct irq_info *info, u8 reason)
{
unsigned long flags;
- spin_lock_irqsave(&info->lock, flags);
+ raw_spin_lock_irqsave(&info->lock, flags);
if (!info->mask_reason)
mask_evtchn(info->evtchn);
info->mask_reason |= reason;
- spin_unlock_irqrestore(&info->lock, flags);
+ raw_spin_unlock_irqrestore(&info->lock, flags);
}
static void do_unmask(struct irq_info *info, u8 reason)
{
unsigned long flags;
- spin_lock_irqsave(&info->lock, flags);
+ raw_spin_lock_irqsave(&info->lock, flags);
info->mask_reason &= ~reason;
if (!info->mask_reason)
unmask_evtchn(info->evtchn);
- spin_unlock_irqrestore(&info->lock, flags);
+ raw_spin_unlock_irqrestore(&info->lock, flags);
}
#ifdef CONFIG_X86
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index cdc6daa7a9f6..1bcdd5227771 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -345,41 +345,6 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Sync with Xen hypervisor after cpu hotadded */
-void xen_pcpu_hotplug_sync(void)
-{
- schedule_work(&xen_pcpu_work);
-}
-EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync);
-
-/*
- * For hypervisor presented cpu, return logic cpu id;
- * For hypervisor non-presented cpu, return -ENODEV.
- */
-int xen_pcpu_id(uint32_t acpi_id)
-{
- int cpu_id = 0, max_id = 0;
- struct xen_platform_op op;
-
- op.cmd = XENPF_get_cpuinfo;
- while (cpu_id <= max_id) {
- op.u.pcpu_info.xen_cpuid = cpu_id;
- if (HYPERVISOR_platform_op(&op)) {
- cpu_id++;
- continue;
- }
-
- if (acpi_id == op.u.pcpu_info.acpi_id)
- return cpu_id;
- if (op.u.pcpu_info.max_present > max_id)
- max_id = op.u.pcpu_info.max_present;
- cpu_id++;
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(xen_pcpu_id);
-
static int __init xen_pcpu_init(void)
{
int irq, ret;
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index 108edbcbc040..152dd33bb223 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -7,6 +7,7 @@
#include <linux/math64.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/static_call.h>
#include <asm/paravirt.h>
#include <asm/xen/hypervisor.h>
@@ -175,7 +176,7 @@ void __init xen_time_setup_guest(void)
xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_runstate_update_flag);
- pv_ops.time.steal_clock = xen_steal_clock;
+ static_call_update(pv_steal_clock, xen_steal_clock);
static_key_slow_inc(&paravirt_steal_enabled);
if (xen_runstate_remote)
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c
deleted file mode 100644
index 00ab1ece02e5..000000000000
--- a/drivers/xen/xen-acpi-cpuhotplug.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 Intel Corporation
- * Author: Liu Jinsong <jinsong.liu@intel.com>
- * Author: Jiang Yunhong <yunhong.jiang@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/cpu.h>
-#include <linux/acpi.h>
-#include <linux/uaccess.h>
-#include <acpi/processor.h>
-#include <xen/acpi.h>
-#include <xen/interface/platform.h>
-#include <asm/xen/hypercall.h>
-
-#define PREFIX "ACPI:xen_cpu_hotplug:"
-
-#define INSTALL_NOTIFY_HANDLER 0
-#define UNINSTALL_NOTIFY_HANDLER 1
-
-static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr);
-
-/* --------------------------------------------------------------------------
- Driver Interface
--------------------------------------------------------------------------- */
-
-static int xen_acpi_processor_enable(struct acpi_device *device)
-{
- acpi_status status = 0;
- unsigned long long value;
- union acpi_object object = { 0 };
- struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
- struct acpi_processor *pr = acpi_driver_data(device);
-
- if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
- /* Declared with "Processor" statement; match ProcessorID */
- status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- pr_err(PREFIX "Evaluating processor object\n");
- return -ENODEV;
- }
-
- pr->acpi_id = object.processor.proc_id;
- } else {
- /* Declared with "Device" statement; match _UID */
- status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
- NULL, &value);
- if (ACPI_FAILURE(status)) {
- pr_err(PREFIX "Evaluating processor _UID\n");
- return -ENODEV;
- }
-
- pr->acpi_id = value;
- }
-
- pr->id = xen_pcpu_id(pr->acpi_id);
-
- if (invalid_logical_cpuid(pr->id))
- /* This cpu is not presented at hypervisor, try to hotadd it */
- if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) {
- pr_err(PREFIX "Hotadd CPU (acpi_id = %d) failed.\n",
- pr->acpi_id);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int xen_acpi_processor_add(struct acpi_device *device)
-{
- int ret;
- struct acpi_processor *pr;
-
- if (!device)
- return -EINVAL;
-
- pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
- if (!pr)
- return -ENOMEM;
-
- pr->handle = device->handle;
- strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
- device->driver_data = pr;
-
- ret = xen_acpi_processor_enable(device);
- if (ret)
- pr_err(PREFIX "Error when enabling Xen processor\n");
-
- return ret;
-}
-
-static int xen_acpi_processor_remove(struct acpi_device *device)
-{
- struct acpi_processor *pr;
-
- if (!device)
- return -EINVAL;
-
- pr = acpi_driver_data(device);
- if (!pr)
- return -EINVAL;
-
- kfree(pr);
- return 0;
-}
-
-/*--------------------------------------------------------------
- Acpi processor hotplug support
---------------------------------------------------------------*/
-
-static int is_processor_present(acpi_handle handle)
-{
- acpi_status status;
- unsigned long long sta = 0;
-
-
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-
- if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
- return 1;
-
- /*
- * _STA is mandatory for a processor that supports hot plug
- */
- if (status == AE_NOT_FOUND)
- pr_info(PREFIX "Processor does not support hot plug\n");
- else
- pr_info(PREFIX "Processor Device is not present");
- return 0;
-}
-
-static int xen_apic_id(acpi_handle handle)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- struct acpi_madt_local_apic *lapic;
- int apic_id;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
- return -EINVAL;
-
- if (!buffer.length || !buffer.pointer)
- return -EINVAL;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER ||
- obj->buffer.length < sizeof(*lapic)) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
-
- if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
- !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
- kfree(buffer.pointer);
- return -EINVAL;
- }
-
- apic_id = (uint32_t)lapic->id;
- kfree(buffer.pointer);
- buffer.length = ACPI_ALLOCATE_BUFFER;
- buffer.pointer = NULL;
-
- return apic_id;
-}
-
-static int xen_hotadd_cpu(struct acpi_processor *pr)
-{
- int cpu_id, apic_id, pxm;
- struct xen_platform_op op;
-
- apic_id = xen_apic_id(pr->handle);
- if (apic_id < 0) {
- pr_err(PREFIX "Failed to get apic_id for acpi_id %d\n",
- pr->acpi_id);
- return -ENODEV;
- }
-
- pxm = xen_acpi_get_pxm(pr->handle);
- if (pxm < 0) {
- pr_err(PREFIX "Failed to get _PXM for acpi_id %d\n",
- pr->acpi_id);
- return pxm;
- }
-
- op.cmd = XENPF_cpu_hotadd;
- op.u.cpu_add.apic_id = apic_id;
- op.u.cpu_add.acpi_id = pr->acpi_id;
- op.u.cpu_add.pxm = pxm;
-
- cpu_id = HYPERVISOR_platform_op(&op);
- if (cpu_id < 0)
- pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n",
- pr->acpi_id);
-
- return cpu_id;
-}
-
-static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr)
-{
- if (!is_processor_present(pr->handle))
- return AE_ERROR;
-
- pr->id = xen_hotadd_cpu(pr);
- if (invalid_logical_cpuid(pr->id))
- return AE_ERROR;
-
- /*
- * Sync with Xen hypervisor, providing new /sys/.../xen_cpuX
- * interface after cpu hotadded.
- */
- xen_pcpu_hotplug_sync();
-
- return AE_OK;
-}
-
-static int acpi_processor_device_remove(struct acpi_device *device)
-{
- pr_debug(PREFIX "Xen does not support CPU hotremove\n");
-
- return -ENOSYS;
-}
-
-static void acpi_processor_hotplug_notify(acpi_handle handle,
- u32 event, void *data)
-{
- struct acpi_processor *pr;
- struct acpi_device *device = NULL;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
- int result;
-
- acpi_scan_lock_acquire();
-
- switch (event) {
- case ACPI_NOTIFY_BUS_CHECK:
- case ACPI_NOTIFY_DEVICE_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Processor driver received %s event\n",
- (event == ACPI_NOTIFY_BUS_CHECK) ?
- "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
-
- if (!is_processor_present(handle))
- break;
-
- acpi_bus_get_device(handle, &device);
- if (acpi_device_enumerated(device))
- break;
-
- result = acpi_bus_scan(handle);
- if (result) {
- pr_err(PREFIX "Unable to add the device\n");
- break;
- }
- device = NULL;
- acpi_bus_get_device(handle, &device);
- if (!acpi_device_enumerated(device)) {
- pr_err(PREFIX "Missing device object\n");
- break;
- }
- ost_code = ACPI_OST_SC_SUCCESS;
- break;
-
- case ACPI_NOTIFY_EJECT_REQUEST:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "received ACPI_NOTIFY_EJECT_REQUEST\n"));
-
- if (acpi_bus_get_device(handle, &device)) {
- pr_err(PREFIX "Device don't exist, dropping EJECT\n");
- break;
- }
- pr = acpi_driver_data(device);
- if (!pr) {
- pr_err(PREFIX "Driver data is NULL, dropping EJECT\n");
- break;
- }
-
- /*
- * TBD: implement acpi_processor_device_remove if Xen support
- * CPU hotremove in the future.
- */
- acpi_processor_device_remove(device);
- break;
-
- default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
-
- /* non-hotplug event; possibly handled by other handler */
- goto out;
- }
-
- (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
-
-out:
- acpi_scan_lock_release();
-}
-
-static acpi_status is_processor_device(acpi_handle handle)
-{
- struct acpi_device_info *info;
- char *hid;
- acpi_status status;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_FAILURE(status))
- return status;
-
- if (info->type == ACPI_TYPE_PROCESSOR) {
- kfree(info);
- return AE_OK; /* found a processor object */
- }
-
- if (!(info->valid & ACPI_VALID_HID)) {
- kfree(info);
- return AE_ERROR;
- }
-
- hid = info->hardware_id.string;
- if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
- kfree(info);
- return AE_ERROR;
- }
-
- kfree(info);
- return AE_OK; /* found a processor device object */
-}
-
-static acpi_status
-processor_walk_namespace_cb(acpi_handle handle,
- u32 lvl, void *context, void **rv)
-{
- acpi_status status;
- int *action = context;
-
- status = is_processor_device(handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* not a processor; continue to walk */
-
- switch (*action) {
- case INSTALL_NOTIFY_HANDLER:
- acpi_install_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- acpi_processor_hotplug_notify,
- NULL);
- break;
- case UNINSTALL_NOTIFY_HANDLER:
- acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- acpi_processor_hotplug_notify);
- break;
- default:
- break;
- }
-
- /* found a processor; skip walking underneath */
- return AE_CTRL_DEPTH;
-}
-
-static
-void acpi_processor_install_hotplug_notify(void)
-{
- int action = INSTALL_NOTIFY_HANDLER;
- acpi_walk_namespace(ACPI_TYPE_ANY,
- ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- processor_walk_namespace_cb, NULL, &action, NULL);
-}
-
-static
-void acpi_processor_uninstall_hotplug_notify(void)
-{
- int action = UNINSTALL_NOTIFY_HANDLER;
- acpi_walk_namespace(ACPI_TYPE_ANY,
- ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- processor_walk_namespace_cb, NULL, &action, NULL);
-}
-
-static const struct acpi_device_id processor_device_ids[] = {
- {ACPI_PROCESSOR_OBJECT_HID, 0},
- {ACPI_PROCESSOR_DEVICE_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, processor_device_ids);
-
-static struct acpi_driver xen_acpi_processor_driver = {
- .name = "processor",
- .class = ACPI_PROCESSOR_CLASS,
- .ids = processor_device_ids,
- .ops = {
- .add = xen_acpi_processor_add,
- .remove = xen_acpi_processor_remove,
- },
-};
-
-static int __init xen_acpi_processor_init(void)
-{
- int result = 0;
-
- if (!xen_initial_domain())
- return -ENODEV;
-
- /* unregister the stub which only used to reserve driver space */
- xen_stub_processor_exit();
-
- result = acpi_bus_register_driver(&xen_acpi_processor_driver);
- if (result < 0) {
- xen_stub_processor_init();
- return result;
- }
-
- acpi_processor_install_hotplug_notify();
- return 0;
-}
-
-static void __exit xen_acpi_processor_exit(void)
-{
- if (!xen_initial_domain())
- return;
-
- acpi_processor_uninstall_hotplug_notify();
-
- acpi_bus_unregister_driver(&xen_acpi_processor_driver);
-
- /*
- * stub reserve space again to prevent any chance of native
- * driver loading.
- */
- xen_stub_processor_init();
- return;
-}
-
-module_init(xen_acpi_processor_init);
-module_exit(xen_acpi_processor_exit);
-ACPI_MODULE_NAME("xen-acpi-cpuhotplug");
-MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
-MODULE_DESCRIPTION("Xen Hotplug CPU Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
deleted file mode 100644
index f914b72557ef..000000000000
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ /dev/null
@@ -1,475 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 Intel Corporation
- * Author: Liu Jinsong <jinsong.liu@intel.com>
- * Author: Jiang Yunhong <yunhong.jiang@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/acpi.h>
-#include <xen/acpi.h>
-#include <xen/interface/platform.h>
-#include <asm/xen/hypercall.h>
-
-#define PREFIX "ACPI:xen_memory_hotplug:"
-
-struct acpi_memory_info {
- struct list_head list;
- u64 start_addr; /* Memory Range start physical addr */
- u64 length; /* Memory Range length */
- unsigned short caching; /* memory cache attribute */
- unsigned short write_protect; /* memory read/write attribute */
- /* copied from buffer getting from _CRS */
- unsigned int enabled:1;
-};
-
-struct acpi_memory_device {
- struct acpi_device *device;
- struct list_head res_list;
-};
-
-static bool acpi_hotmem_initialized __read_mostly;
-
-static int xen_hotadd_memory(int pxm, struct acpi_memory_info *info)
-{
- int rc;
- struct xen_platform_op op;
-
- op.cmd = XENPF_mem_hotadd;
- op.u.mem_add.spfn = info->start_addr >> PAGE_SHIFT;
- op.u.mem_add.epfn = (info->start_addr + info->length) >> PAGE_SHIFT;
- op.u.mem_add.pxm = pxm;
-
- rc = HYPERVISOR_dom0_op(&op);
- if (rc)
- pr_err(PREFIX "Xen Hotplug Memory Add failed on "
- "0x%lx -> 0x%lx, _PXM: %d, error: %d\n",
- (unsigned long)info->start_addr,
- (unsigned long)(info->start_addr + info->length),
- pxm, rc);
-
- return rc;
-}
-
-static int xen_acpi_memory_enable_device(struct acpi_memory_device *mem_device)
-{
- int pxm, result;
- int num_enabled = 0;
- struct acpi_memory_info *info;
-
- if (!mem_device)
- return -EINVAL;
-
- pxm = xen_acpi_get_pxm(mem_device->device->handle);
- if (pxm < 0)
- return pxm;
-
- list_for_each_entry(info, &mem_device->res_list, list) {
- if (info->enabled) { /* just sanity check...*/
- num_enabled++;
- continue;
- }
-
- if (!info->length)
- continue;
-
- result = xen_hotadd_memory(pxm, info);
- if (result)
- continue;
- info->enabled = 1;
- num_enabled++;
- }
-
- if (!num_enabled)
- return -ENODEV;
-
- return 0;
-}
-
-static acpi_status
-acpi_memory_get_resource(struct acpi_resource *resource, void *context)
-{
- struct acpi_memory_device *mem_device = context;
- struct acpi_resource_address64 address64;
- struct acpi_memory_info *info, *new;
- acpi_status status;
-
- status = acpi_resource_to_address64(resource, &address64);
- if (ACPI_FAILURE(status) ||
- (address64.resource_type != ACPI_MEMORY_RANGE))
- return AE_OK;
-
- list_for_each_entry(info, &mem_device->res_list, list) {
- if ((info->caching == address64.info.mem.caching) &&
- (info->write_protect == address64.info.mem.write_protect) &&
- (info->start_addr + info->length == address64.address.minimum)) {
- info->length += address64.address.address_length;
- return AE_OK;
- }
- }
-
- new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
- if (!new)
- return AE_ERROR;
-
- INIT_LIST_HEAD(&new->list);
- new->caching = address64.info.mem.caching;
- new->write_protect = address64.info.mem.write_protect;
- new->start_addr = address64.address.minimum;
- new->length = address64.address.address_length;
- list_add_tail(&new->list, &mem_device->res_list);
-
- return AE_OK;
-}
-
-static int
-acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
-{
- acpi_status status;
- struct acpi_memory_info *info, *n;
-
- if (!list_empty(&mem_device->res_list))
- return 0;
-
- status = acpi_walk_resources(mem_device->device->handle,
- METHOD_NAME__CRS, acpi_memory_get_resource, mem_device);
-
- if (ACPI_FAILURE(status)) {
- list_for_each_entry_safe(info, n, &mem_device->res_list, list)
- kfree(info);
- INIT_LIST_HEAD(&mem_device->res_list);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int acpi_memory_get_device(acpi_handle handle,
- struct acpi_memory_device **mem_device)
-{
- struct acpi_device *device = NULL;
- int result = 0;
-
- acpi_scan_lock_acquire();
-
- acpi_bus_get_device(handle, &device);
- if (acpi_device_enumerated(device))
- goto end;
-
- /*
- * Now add the notified device. This creates the acpi_device
- * and invokes .add function
- */
- result = acpi_bus_scan(handle);
- if (result) {
- pr_warn(PREFIX "ACPI namespace scan failed\n");
- result = -EINVAL;
- goto out;
- }
- device = NULL;
- acpi_bus_get_device(handle, &device);
- if (!acpi_device_enumerated(device)) {
- pr_warn(PREFIX "Missing device object\n");
- result = -EINVAL;
- goto out;
- }
-
-end:
- *mem_device = acpi_driver_data(device);
- if (!(*mem_device)) {
- pr_err(PREFIX "driver data not found\n");
- result = -ENODEV;
- goto out;
- }
-
-out:
- acpi_scan_lock_release();
- return result;
-}
-
-static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
-{
- unsigned long long current_status;
-
- /* Get device present/absent information from the _STA */
- if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
- "_STA", NULL, &current_status)))
- return -ENODEV;
- /*
- * Check for device status. Device should be
- * present/enabled/functioning.
- */
- if (!((current_status & ACPI_STA_DEVICE_PRESENT)
- && (current_status & ACPI_STA_DEVICE_ENABLED)
- && (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
- return -ENODEV;
-
- return 0;
-}
-
-static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
-{
- pr_debug(PREFIX "Xen does not support memory hotremove\n");
-
- return -ENOSYS;
-}
-
-static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
-{
- struct acpi_memory_device *mem_device;
- struct acpi_device *device;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-
- switch (event) {
- case ACPI_NOTIFY_BUS_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived BUS CHECK notification for device\n"));
- fallthrough;
- case ACPI_NOTIFY_DEVICE_CHECK:
- if (event == ACPI_NOTIFY_DEVICE_CHECK)
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived DEVICE CHECK notification for device\n"));
-
- if (acpi_memory_get_device(handle, &mem_device)) {
- pr_err(PREFIX "Cannot find driver data\n");
- break;
- }
-
- ost_code = ACPI_OST_SC_SUCCESS;
- break;
-
- case ACPI_NOTIFY_EJECT_REQUEST:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived EJECT REQUEST notification for device\n"));
-
- acpi_scan_lock_acquire();
- if (acpi_bus_get_device(handle, &device)) {
- acpi_scan_lock_release();
- pr_err(PREFIX "Device doesn't exist\n");
- break;
- }
- mem_device = acpi_driver_data(device);
- if (!mem_device) {
- acpi_scan_lock_release();
- pr_err(PREFIX "Driver Data is NULL\n");
- break;
- }
-
- /*
- * TBD: implement acpi_memory_disable_device and invoke
- * acpi_bus_remove if Xen support hotremove in the future
- */
- acpi_memory_disable_device(mem_device);
- acpi_scan_lock_release();
- break;
-
- default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
- /* non-hotplug event; possibly handled by other handler */
- return;
- }
-
- (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
- return;
-}
-
-static int xen_acpi_memory_device_add(struct acpi_device *device)
-{
- int result;
- struct acpi_memory_device *mem_device = NULL;
-
-
- if (!device)
- return -EINVAL;
-
- mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
- if (!mem_device)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&mem_device->res_list);
- mem_device->device = device;
- sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
- sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
- device->driver_data = mem_device;
-
- /* Get the range from the _CRS */
- result = acpi_memory_get_device_resources(mem_device);
- if (result) {
- kfree(mem_device);
- return result;
- }
-
- /*
- * For booting existed memory devices, early boot code has recognized
- * memory area by EFI/E820. If DSDT shows these memory devices on boot,
- * hotplug is not necessary for them.
- * For hot-added memory devices during runtime, it need hypercall to
- * Xen hypervisor to add memory.
- */
- if (!acpi_hotmem_initialized)
- return 0;
-
- if (!acpi_memory_check_device(mem_device))
- result = xen_acpi_memory_enable_device(mem_device);
-
- return result;
-}
-
-static int xen_acpi_memory_device_remove(struct acpi_device *device)
-{
- struct acpi_memory_device *mem_device = NULL;
-
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
- mem_device = acpi_driver_data(device);
- kfree(mem_device);
-
- return 0;
-}
-
-/*
- * Helper function to check for memory device
- */
-static acpi_status is_memory_device(acpi_handle handle)
-{
- char *hardware_id;
- acpi_status status;
- struct acpi_device_info *info;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_FAILURE(status))
- return status;
-
- if (!(info->valid & ACPI_VALID_HID)) {
- kfree(info);
- return AE_ERROR;
- }
-
- hardware_id = info->hardware_id.string;
- if ((hardware_id == NULL) ||
- (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
- status = AE_ERROR;
-
- kfree(info);
- return status;
-}
-
-static acpi_status
-acpi_memory_register_notify_handler(acpi_handle handle,
- u32 level, void *ctxt, void **retv)
-{
- acpi_status status;
-
- status = is_memory_device(handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* continue */
-
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- acpi_memory_device_notify, NULL);
- /* continue */
- return AE_OK;
-}
-
-static acpi_status
-acpi_memory_deregister_notify_handler(acpi_handle handle,
- u32 level, void *ctxt, void **retv)
-{
- acpi_status status;
-
- status = is_memory_device(handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* continue */
-
- status = acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- acpi_memory_device_notify);
-
- return AE_OK; /* continue */
-}
-
-static const struct acpi_device_id memory_device_ids[] = {
- {ACPI_MEMORY_DEVICE_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, memory_device_ids);
-
-static struct acpi_driver xen_acpi_memory_device_driver = {
- .name = "acpi_memhotplug",
- .class = ACPI_MEMORY_DEVICE_CLASS,
- .ids = memory_device_ids,
- .ops = {
- .add = xen_acpi_memory_device_add,
- .remove = xen_acpi_memory_device_remove,
- },
-};
-
-static int __init xen_acpi_memory_device_init(void)
-{
- int result;
- acpi_status status;
-
- if (!xen_initial_domain())
- return -ENODEV;
-
- /* unregister the stub which only used to reserve driver space */
- xen_stub_memory_device_exit();
-
- result = acpi_bus_register_driver(&xen_acpi_memory_device_driver);
- if (result < 0) {
- xen_stub_memory_device_init();
- return -ENODEV;
- }
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- acpi_memory_register_notify_handler,
- NULL, NULL, NULL);
-
- if (ACPI_FAILURE(status)) {
- pr_warn(PREFIX "walk_namespace failed\n");
- acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
- xen_stub_memory_device_init();
- return -ENODEV;
- }
-
- acpi_hotmem_initialized = true;
- return 0;
-}
-
-static void __exit xen_acpi_memory_device_exit(void)
-{
- acpi_status status;
-
- if (!xen_initial_domain())
- return;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- acpi_memory_deregister_notify_handler,
- NULL, NULL, NULL);
- if (ACPI_FAILURE(status))
- pr_warn(PREFIX "walk_namespace failed\n");
-
- acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
-
- /*
- * stub reserve space again to prevent any chance of native
- * driver loading.
- */
- xen_stub_memory_device_init();
- return;
-}
-
-module_init(xen_acpi_memory_device_init);
-module_exit(xen_acpi_memory_device_exit);
-ACPI_MODULE_NAME("xen-acpi-memhotplug");
-MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
-MODULE_DESCRIPTION("Xen Hotplug Mem Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index cb904ac83006..f8e4faa96ad6 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -802,7 +802,7 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
"guest with no AER driver should have been killed\n");
goto end;
}
- result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
+ result = common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_slotreset, result);
if (result == PCI_ERS_RESULT_NONE ||
result == PCI_ERS_RESULT_DISCONNECT) {
@@ -859,7 +859,7 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
"guest with no AER driver should have been killed\n");
goto end;
}
- result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
+ result = common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_mmio, result);
if (result == PCI_ERS_RESULT_NONE ||
result == PCI_ERS_RESULT_DISCONNECT) {
@@ -970,7 +970,7 @@ static void xen_pcibk_error_resume(struct pci_dev *dev)
kill_domain_by_device(psdev);
goto end;
}
- common_process(psdev, 1, XEN_PCI_OP_aer_resume,
+ common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_resume,
PCI_ERS_RESULT_RECOVERED);
end:
if (psdev)
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 5447b5ab7c76..4162d0e7e00d 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -233,7 +233,6 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
unsigned int *devfn)
{
struct pci_dev_entry *entry;
- struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
int found = 0, slot;
@@ -242,11 +241,7 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
list_for_each_entry(entry,
&vpci_dev->dev_list[slot],
list) {
- dev = entry->dev;
- if (dev && dev->bus->number == pcidev->bus->number
- && pci_domain_nr(dev->bus) ==
- pci_domain_nr(pcidev->bus)
- && dev->devfn == pcidev->devfn) {
+ if (entry->dev == pcidev) {
found = 1;
*domain = 0;
*bus = 0;
diff --git a/drivers/xen/xen-stub.c b/drivers/xen/xen-stub.c
deleted file mode 100644
index 3be4e74660b5..000000000000
--- a/drivers/xen/xen-stub.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * xen-stub.c - stub drivers to reserve space for Xen
- *
- * Copyright (C) 2012 Intel Corporation
- * Author: Liu Jinsong <jinsong.liu@intel.com>
- * Author: Jiang Yunhong <yunhong.jiang@intel.com>
- *
- * Copyright (C) 2012 Oracle Inc
- * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/acpi.h>
-#include <xen/acpi.h>
-
-#ifdef CONFIG_ACPI
-
-/*--------------------------------------------
- stub driver for Xen memory hotplug
---------------------------------------------*/
-
-static const struct acpi_device_id memory_device_ids[] = {
- {ACPI_MEMORY_DEVICE_HID, 0},
- {"", 0},
-};
-
-static struct acpi_driver xen_stub_memory_device_driver = {
- /* same name as native memory driver to block native loaded */
- .name = "acpi_memhotplug",
- .class = ACPI_MEMORY_DEVICE_CLASS,
- .ids = memory_device_ids,
-};
-
-int xen_stub_memory_device_init(void)
-{
- if (!xen_initial_domain())
- return -ENODEV;
-
- /* just reserve space for Xen, block native driver loaded */
- return acpi_bus_register_driver(&xen_stub_memory_device_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_memory_device_init);
-subsys_initcall(xen_stub_memory_device_init);
-
-void xen_stub_memory_device_exit(void)
-{
- acpi_bus_unregister_driver(&xen_stub_memory_device_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit);
-
-
-/*--------------------------------------------
- stub driver for Xen cpu hotplug
---------------------------------------------*/
-
-static const struct acpi_device_id processor_device_ids[] = {
- {ACPI_PROCESSOR_OBJECT_HID, 0},
- {ACPI_PROCESSOR_DEVICE_HID, 0},
- {"", 0},
-};
-
-static struct acpi_driver xen_stub_processor_driver = {
- /* same name as native processor driver to block native loaded */
- .name = "processor",
- .class = ACPI_PROCESSOR_CLASS,
- .ids = processor_device_ids,
-};
-
-int xen_stub_processor_init(void)
-{
- if (!xen_initial_domain())
- return -ENODEV;
-
- /* just reserve space for Xen, block native driver loaded */
- return acpi_bus_register_driver(&xen_stub_processor_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_processor_init);
-subsys_initcall(xen_stub_processor_init);
-
-void xen_stub_processor_exit(void)
-{
- acpi_bus_unregister_driver(&xen_stub_processor_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_processor_exit);
-
-#endif