summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-12-17 22:08:28 -0500
committerDavid S. Miller <davem@davemloft.net>2015-12-17 22:08:28 -0500
commitb3e0d3d7bab14f2544a3314bec53a23dc7dd2206 (patch)
tree2bd3c1c1d128e0c362655fa70a6eea02fc856f62
parent3268e5cb494d8778a5a67a9fa2b1bdb0243b77ad (diff)
parent73796d8bf27372e26c2b79881947304c14c2d353 (diff)
downloadlinux-next-b3e0d3d7bab14f2544a3314bec53a23dc7dd2206.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/geneve.c Here we had an overlapping change, where in 'net' the extraneous stats bump was being removed whilst in 'net-next' the final argument to udp_tunnel6_xmit_skb() was being changed. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt10
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt4
-rw-r--r--Documentation/networking/e100.txt14
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi4
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts1
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi8
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi8
-rw-r--r--arch/arm/boot/dts/vf610-colibri.dtsi5
-rw-r--r--arch/arm/boot/dts/vf610.dtsi2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi6
-rw-r--r--arch/arm/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm/include/asm/uaccess.h4
-rw-r--r--arch/arm/kernel/process.c33
-rw-r--r--arch/arm/kernel/swp_emulate.c6
-rw-r--r--arch/arm/kvm/mmio.c5
-rw-r--r--arch/arm/kvm/mmu.c4
-rw-r--r--arch/arm/kvm/psci.c20
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c29
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-at91/pm.c7
-rw-r--r--arch/arm/mach-exynos/pmu.c6
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h12
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-pxa/ezx.c5
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c2
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c2
-rw-r--r--arch/arm/mm/context.c38
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/init.c92
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi5
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h18
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c123
-rw-r--r--arch/arm64/kvm/sys_regs.h8
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c4
-rw-r--r--arch/blackfin/kernel/perf_event.c2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/microblaze/kernel/dma.c3
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/parisc/include/asm/pgtable.h3
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/pci.c18
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/boot/dts/sbc8641d.dts8
-rw-r--r--arch/powerpc/kernel/eeh_driver.c14
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c58
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/perf_event.c2
-rw-r--r--arch/sparc/kernel/perf_event.c2
-rw-r--r--arch/tile/kernel/perf_event.c2
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/net_user.c10
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/x86/boot/boot.h1
-rw-r--r--arch/x86/boot/video-mode.c2
-rw-r--r--arch/x86/boot/video.c2
-rw-r--r--arch/x86/entry/entry_64.S19
-rw-r--r--arch/x86/include/asm/page_types.h16
-rw-r--r--arch/x86/include/asm/pgtable_types.h14
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c4
-rw-r--r--arch/x86/kernel/irq_work.c2
-rw-r--r--arch/x86/kernel/pmem.c12
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/signal.c17
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/mm/mpx.c6
-rw-r--r--arch/x86/pci/bus_numa.c13
-rw-r--r--arch/x86/um/signal.c18
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-core.c12
-rw-r--r--block/blk-settings.c36
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--crypto/ablkcipher.c2
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/acpi/nfit.c67
-rw-r--r--drivers/acpi/nfit.h3
-rw-r--r--drivers/acpi/pci_root.c7
-rw-r--r--drivers/ata/ahci.c22
-rw-r--r--drivers/ata/ahci_mvebu.c5
-rw-r--r--drivers/ata/libahci.c9
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/sata_fsl.c3
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/domain_governor.c3
-rw-r--r--drivers/block/null_blk.c32
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/clk/clk-gpio.c33
-rw-r--r--drivers/clk/clk-qoriq.c4
-rw-r--r--drivers/clk/clk-scpi.c1
-rw-r--r--drivers/clk/imx/clk-pllv1.c14
-rw-r--r--drivers/clk/imx/clk-pllv2.c9
-rw-r--r--drivers/clk/imx/clk-vf610.c8
-rw-r--r--drivers/clk/mmp/clk-mmp2.c1
-rw-r--r--drivers/clk/mmp/clk-pxa168.c1
-rw-r--r--drivers/clk/mmp/clk-pxa910.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c23
-rw-r--r--drivers/clk/ti/clk-816x.c2
-rw-r--r--drivers/clk/ti/clkt_dpll.c4
-rw-r--r--drivers/clk/ti/divider.c16
-rw-r--r--drivers/clk/ti/fapll.c4
-rw-r--r--drivers/clk/ti/mux.c15
-rw-r--r--drivers/clocksource/mmio.c2
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c3
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/dma/at_xdmac.c9
-rw-r--r--drivers/dma/bcm2835-dma.c78
-rw-r--r--drivers/dma/edma.c53
-rw-r--r--drivers/dma/mic_x100_dma.c15
-rw-r--r--drivers/fpga/fpga-mgr.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c5
-rw-r--r--drivers/gpu/drm/drm_drv.c5
-rw-r--r--drivers/gpu/drm/drm_fops.c84
-rw-r--r--drivers/gpu/drm/drm_irq.c54
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c36
-rw-r--r--drivers/gpu/drm/i915/intel_display.c57
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c51
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c5
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c34
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c7
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h3
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c63
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c9
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c12
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c106
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c100
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c69
-rw-r--r--drivers/gpu/vga/vgaarb.c6
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c9
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c4
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-core.c2
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c6
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/mad.c5
-rw-r--r--drivers/infiniband/core/sa_query.c32
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c27
-rw-r--r--drivers/infiniband/core/verbs.c43
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c19
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c48
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h5
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c5
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c23
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c7
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/lightnvm/core.c85
-rw-r--r--drivers/lightnvm/gennvm.c20
-rw-r--r--drivers/lightnvm/rrpc.c25
-rw-r--r--drivers/md/dm-thin-metadata.c34
-rw-r--r--drivers/md/persistent-data/dm-btree.c101
-rw-r--r--drivers/md/persistent-data/dm-btree.h14
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c32
-rw-r--r--drivers/misc/cxl/native.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c38
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c7
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c39
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c30
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c49
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c55
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h25
-rw-r--r--drivers/net/ethernet/sfc/ef10.c24
-rw-r--r--drivers/net/ethernet/sfc/efx.h5
-rw-r--r--drivers/net/ethernet/sfc/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/phy/mdio-mux.c7
-rw-r--r--drivers/net/phy/micrel.c13
-rw-r--r--drivers/net/ppp/pppoe.c14
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/usb/cdc_mbim.c26
-rw-r--r--drivers/net/usb/cdc_ncm.c10
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/virtio_net.c34
-rw-r--r--drivers/net/vxlan.c75
-rw-r--r--drivers/nvme/host/lightnvm.c26
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/of/irq.c3
-rw-r--r--drivers/of/of_reserved_mem.c8
-rw-r--r--drivers/parisc/iommu-helpers.h15
-rw-r--r--drivers/pci/host/pcie-altera.c23
-rw-r--r--drivers/pci/msi.c4
-rw-r--r--drivers/pci/pci-driver.c16
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/hosts.c11
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/mpt3sas/Kconfig9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/sd.c69
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h1
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c17
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c20
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/core/config.c3
-rw-r--r--drivers/usb/core/hub.c22
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/platform.c81
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_midi.c3
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/host/ohci-at91.c11
-rw-r--r--drivers/usb/host/whci/qset.c4
-rw-r--r--drivers/usb/host/xhci-hub.c47
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c6
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c5
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c11
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/usb-serial-simple.c1
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/vfio/Kconfig15
-rw-r--r--drivers/vfio/pci/vfio_pci.c10
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c5
-rw-r--r--drivers/vfio/vfio.c188
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c13
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c12
-rw-r--r--drivers/virtio/virtio.c1
-rw-r--r--drivers/virtio/virtio_ring.c48
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/direct-io.c1
-rw-r--r--fs/exofs/inode.c5
-rw-r--r--fs/ext4/crypto.c2
-rw-r--r--fs/ext4/ext4.h51
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/ext4/sysfs.c2
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/jbd2/transaction.c12
-rw-r--r--fs/namei.c1
-rw-r--r--fs/nfs/callback_xdr.c7
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/objlayout/objio_osd.c5
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c4
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/overlayfs/copy_up.c23
-rw-r--r--fs/overlayfs/inode.c19
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/drm/drmP.h10
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/cgroup-defs.h13
-rw-r--r--include/linux/cgroup.h47
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/jump_label.h2
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lightnvm.h21
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mlx4/device.h11
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/of_irq.h19
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/platform_data/edma.h2
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/qed/common_hsi.h2
-rw-r--r--include/linux/qed/qed_chain.h3
-rw-r--r--include/linux/rhashtable.h18
-rw-r--r--include/linux/stop_machine.h6
-rw-r--r--include/linux/uprobes.h2
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/vfio.h3
-rw-r--r--include/linux/wait.h10
-rw-r--r--include/net/dst.h33
-rw-r--r--include/net/inet_sock.h27
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h7
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/xfrm.h25
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/sound/hda_register.h3
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/openvswitch.h2
-rw-r--r--include/uapi/linux/vfio.h7
-rw-r--r--include/video/imx-ipu-v3.h1
-rw-r--r--init/Kconfig7
-rw-r--r--kernel/cgroup.c99
-rw-r--r--kernel/cgroup_freezer.c23
-rw-r--r--kernel/cgroup_pids.c77
-rw-r--r--kernel/cpuset.c33
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c90
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/jump_label.c2
-rw-r--r--kernel/locking/lockdep.c2
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/osq_lock.c8
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/core.c48
-rw-r--r--kernel/sched/cputime.c3
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/sched/wait.c28
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--kernel/trace/trace_event_perf.c2
-rw-r--r--lib/btree.c2
-rw-r--r--lib/dma-debug.c4
-rw-r--r--lib/proportions.c2
-rw-r--r--lib/rhashtable.c67
-rw-r--r--mm/backing-dev.c19
-rw-r--r--mm/hugetlb.c27
-rw-r--r--mm/memcontrol.c49
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/shmem.c34
-rw-r--r--mm/vmstat.c8
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/batman-adv/distributed-arp-table.c5
-rw-r--r--net/batman-adv/routing.c19
-rw-r--r--net/batman-adv/translation-table.c16
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/core/netclassid_cgroup.c6
-rw-r--r--net/core/netprio_cgroup.c9
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c7
-rw-r--r--net/decnet/af_decnet.c3
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/fou.c3
-rw-r--r--net/ipv4/netfilter/Kconfig1
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c23
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/netfilter/Kconfig1
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/mlme.c17
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/util.c113
-rw-r--r--net/mac80211/vht.c10
-rw-r--r--net/mpls/af_mpls.c43
-rw-r--r--net/mpls/mpls_iptunnel.c4
-rw-r--r--net/netfilter/nf_tables_api.c99
-rw-r--r--net/netfilter/nfnetlink.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/openvswitch/conntrack.c16
-rw-r--r--net/rfkill/core.c6
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sctp/ipv6.c11
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_statefuns.c3
-rw-r--r--net/sctp/socket.c12
-rw-r--r--net/socket.c1
-rw-r--r--net/sunrpc/backchannel_rqst.c8
-rw-r--r--net/sunrpc/sched.c6
-rw-r--r--net/sunrpc/svc.c12
-rw-r--r--net/unix/af_unix.c13
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/xfrm/xfrm_policy.c50
-rwxr-xr-xscripts/link-vmlinux.sh2
-rw-r--r--sound/pci/hda/hda_intel.c23
-rw-r--r--sound/pci/hda/patch_ca0132.c3
-rw-r--r--sound/pci/hda/patch_conexant.c5
-rw-r--r--sound/pci/hda/patch_hdmi.c6
-rw-r--r--sound/pci/hda/patch_realtek.c32
-rw-r--r--sound/pci/rme96.c41
-rw-r--r--sound/soc/codecs/arizona.c16
-rw-r--r--sound/soc/codecs/es8328.c16
-rw-r--r--sound/soc/codecs/nau8825.c31
-rw-r--r--sound/soc/codecs/rl6231.c6
-rw-r--r--sound/soc/codecs/rt5645.c61
-rw-r--r--sound/soc/codecs/rt5670.h12
-rw-r--r--sound/soc/codecs/rt5677.c100
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/davinci/davinci-mcasp.c12
-rw-r--r--sound/soc/fsl/Kconfig2
-rw-r--r--sound/soc/fsl/fsl_sai.c3
-rw-r--r--sound/soc/intel/Kconfig2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c1
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c2
-rw-r--r--sound/soc/rockchip/rockchip_spdif.h6
-rw-r--r--sound/soc/sh/rcar/gen.c2
-rw-r--r--sound/soc/sh/rcar/src.c7
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/soc/soc-ops.c2
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/soc/sti/uniperif_player.c9
-rw-r--r--sound/soc/sti/uniperif_reader.c3
-rw-r--r--sound/soc/sunxi/sun4i-codec.c27
-rw-r--r--tools/testing/nvdimm/test/nfit.c49
-rw-r--r--tools/virtio/linux/kernel.h6
-rw-r--r--tools/virtio/linux/virtio.h6
-rw-r--r--tools/virtio/linux/virtio_config.h20
550 files changed, 4562 insertions, 2905 deletions
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index d3d0a4fb1c73..079b42a81d7c 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -22,8 +22,7 @@ Required properties:
Optional properties:
- ti,hwmods: Name of the hwmods associated to the eDMA CC
- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
- these channels will be SW triggered channels. The list must
- contain 16 bits numbers, see example.
+ these channels will be SW triggered channels. See example.
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
the driver, they are allocated to be used by for example the
DSP. See example.
@@ -56,10 +55,9 @@ edma: edma@49000000 {
ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
/* Channel 20 and 21 is allocated for memcpy */
- ti,edma-memcpy-channels = /bits/ 16 <20 21>;
- /* The following PaRAM slots are reserved: 35-45 and 100-110 */
- ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
- /bits/ 16 <100 10>;
+ ti,edma-memcpy-channels = <20 21>;
+ /* The following PaRAM slots are reserved: 35-44 and 100-109 */
+ ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
};
edma_tptc0: tptc@49800000 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
index f2455c50533d..120bc4971cf3 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
@@ -11,6 +11,10 @@ Required properties:
0 = active high
1 = active low
+Optional properties:
+- little-endian : GPIO registers are used as little endian. If not
+ present registers are used as big endian by default.
+
Example:
gpio0: gpio@1100 {
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index f862cf3aff34..42ddbd4b52a9 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related to the
issue to e1000-devel@lists.sourceforge.net.
-
-
-License
-=======
-
-This software program is released under the terms of a license agreement
-between you ('Licensee') and Intel. Do not use or load this software or any
-associated materials (collectively, the 'Software') until you have carefully
-read the full terms and conditions of the file COPYING located in this software
-package. By loading or using the Software, you agree to the terms of this
-Agreement. If you do not agree with the terms of this Agreement, do not install
-or use the Software.
-
-* Other names and brands may be claimed as the property of others.
diff --git a/MAINTAINERS b/MAINTAINERS
index 2bd095be19d2..c6b78b053939 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -318,7 +318,7 @@ M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
S: Supported
-F: drivers/acpi/video.c
+F: drivers/acpi/acpi_video.c
ACPI WMI DRIVER
L: platform-driver-x86@vger.kernel.org
@@ -2984,6 +2984,7 @@ F: kernel/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
+M: Vladimir Davydov <vdavydov@virtuozzo.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
@@ -5586,7 +5587,7 @@ R: Jesse Brandeburg <jesse.brandeburg@intel.com>
R: Shannon Nelson <shannon.nelson@intel.com>
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
R: Don Skidmore <donald.c.skidmore@intel.com>
-R: Matthew Vick <matthew.vick@intel.com>
+R: Bruce Allan <bruce.w.allan@intel.com>
R: John Ronciak <john.ronciak@intel.com>
R: Mitch Williams <mitch.a.williams@intel.com>
L: intel-wired-lan@lists.osuosl.org
@@ -8302,7 +8303,7 @@ F: include/linux/delayacct.h
F: kernel/delayacct.c
PERFORMANCE EVENTS SUBSYSTEM
-M: Peter Zijlstra <a.p.zijlstra@chello.nl>
+M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -8961,6 +8962,13 @@ F: drivers/rpmsg/
F: Documentation/rpmsg.txt
F: include/linux/rpmsg.h
+RENESAS ETHERNET DRIVERS
+R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+L: netdev@vger.kernel.org
+L: linux-sh@vger.kernel.org
+F: drivers/net/ethernet/renesas/
+F: include/linux/sh_eth.h
+
RESET CONTROLLER FRAMEWORK
M: Philipp Zabel <p.zabel@pengutronix.de>
S: Maintained
@@ -9443,8 +9451,10 @@ F: include/scsi/sg.h
SCSI SUBSYSTEM
M: "James E.J. Bottomley" <JBottomley@odin.com>
-L: linux-scsi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
+M: "Martin K. Petersen" <martin.petersen@oracle.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
+L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/
F: include/scsi/
diff --git a/Makefile b/Makefile
index 904a1d65df30..bc0165d0f5cf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
NAME = Blurry Fish Butt
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index d83ff9c9701e..de8791a4d131 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -74,7 +74,7 @@
reg = <0x48240200 0x100>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&dpll_mpu_m2_ck>;
+ clocks = <&mpu_periphclk>;
};
local_timer: timer@48240600 {
@@ -82,7 +82,7 @@
reg = <0x48240600 0x100>;
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
- clocks = <&dpll_mpu_m2_ck>;
+ clocks = <&mpu_periphclk>;
};
l2-cache-controller@48242000 {
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index cc88728d751d..a38af2bfbfcf 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -259,6 +259,14 @@
ti,invert-autoidle-bit;
};
+ mpu_periphclk: mpu_periphclk {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&dpll_mpu_m2_ck>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+
dpll_ddr_ck: dpll_ddr_ck {
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index ad6de73ed5a5..e74df327cdd3 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -184,6 +184,7 @@
regulator-name = "VDD_SDHC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
};
};
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 8ea177f375dd..fb1da99996ea 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -118,7 +118,8 @@
sdhci0: sdhci@ab0000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0000 0x200>;
- clocks = <&chip_clk CLKID_SDIO1XIN>;
+ clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
+ clock-names = "io", "core";
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -126,7 +127,8 @@
sdhci1: sdhci@ab0800 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0800 0x200>;
- clocks = <&chip_clk CLKID_SDIO1XIN>;
+ clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
+ clock-names = "io", "core";
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -135,7 +137,7 @@
compatible = "mrvl,pxav3-mmc";
reg = <0xab1000 0x200>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>;
+ clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
clock-names = "io", "core";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c99cfa1a876..eee636de4cd8 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -218,6 +218,7 @@
reg = <0x480c8000 0x2000>;
interrupts = <77>;
ti,hwmods = "mailbox";
+ #mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <12>;
mbox_dsp: mbox_dsp {
@@ -279,8 +280,11 @@
ti,spi-num-cs = <4>;
ti,hwmods = "mcspi1";
dmas = <&edma 16 &edma 17
- &edma 18 &edma 19>;
- dma-names = "tx0", "rx0", "tx1", "rx1";
+ &edma 18 &edma 19
+ &edma 20 &edma 21
+ &edma 22 &edma 23>;
+ dma-names = "tx0", "rx0", "tx1", "rx1",
+ "tx2", "rx2", "tx3", "rx3";
};
mmc1: mmc@48060000 {
diff --git a/arch/arm/boot/dts/vf610-colibri.dtsi b/arch/arm/boot/dts/vf610-colibri.dtsi
index 19fe045b8334..2d7eab755210 100644
--- a/arch/arm/boot/dts/vf610-colibri.dtsi
+++ b/arch/arm/boot/dts/vf610-colibri.dtsi
@@ -18,8 +18,3 @@
reg = <0x80000000 0x10000000>;
};
};
-
-&L2 {
- arm,data-latency = <2 1 2>;
- arm,tag-latency = <3 2 3>;
-};
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index 5f8eb1bd782b..58bc6e448be5 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -19,7 +19,7 @@
reg = <0x40006000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,data-latency = <1 1 1>;
+ arm,data-latency = <3 3 3>;
arm,tag-latency = <2 2 2>;
};
};
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 0d5acc2cdc8e..3cd1b27f2697 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -178,8 +178,10 @@
compatible = "fsl,vf610-sai";
reg = <0x40031000 0x1000>;
interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks VF610_CLK_SAI2>;
- clock-names = "sai";
+ clocks = <&clks VF610_CLK_SAI2>,
+ <&clks VF610_CLK_SAI2_DIV>,
+ <&clks 0>, <&clks 0>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 0 21>,
<&edma0 0 20>;
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 6607d976e07d..7da5503c0591 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -21,6 +21,7 @@
#ifndef __ASSEMBLY__
#include <linux/io.h>
+#include <asm/barrier.h>
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index a9c80a2ea1a7..3095df091ff8 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -28,6 +28,18 @@
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
+static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
+ u8 reg_num)
+{
+ return *vcpu_reg(vcpu, reg_num);
+}
+
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
+ unsigned long val)
+{
+ *vcpu_reg(vcpu, reg_num) = val;
+}
+
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 8cc85a4ebec2..35c9db857ebe 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
+#else
+ return arm_copy_to_user(to, from, n);
+#endif
}
extern unsigned long __must_check
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7a7c4cea5523..4adfb46e3ee9 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
char buf[64];
+#ifndef CONFIG_CPU_V7M
+ unsigned int domain;
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Get the domain register for the parent context. In user
+ * mode, we don't save the DACR, so lets use what it should
+ * be. For other modes, we place it after the pt_regs struct.
+ */
+ if (user_mode(regs))
+ domain = DACR_UACCESS_ENABLE;
+ else
+ domain = *(unsigned int *)(regs + 1);
+#else
+ domain = get_domain();
+#endif
+#endif
show_regs_print_info(KERN_DEFAULT);
@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
#ifndef CONFIG_CPU_V7M
{
- unsigned int domain = get_domain();
const char *segment;
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- /*
- * Get the domain register for the parent context. In user
- * mode, we don't save the DACR, so lets use what it should
- * be. For other modes, we place it after the pt_regs struct.
- */
- if (user_mode(regs))
- domain = DACR_UACCESS_ENABLE;
- else
- domain = *(unsigned int *)(regs + 1);
-#endif
-
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU
{
- unsigned int transbase, dac = get_domain();
+ unsigned int transbase;
asm("mrc p15, 0, %0, c2, c0\n\t"
: "=r" (transbase));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
- transbase, dac);
+ transbase, domain);
}
#endif
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 5b26e7efa9ea..c3fe769d7558 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -36,10 +36,10 @@
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
- " mov %2, %1\n" \
- "0: ldrex"B" %1, [%3]\n" \
- "1: strex"B" %0, %2, [%3]\n" \
+ "0: ldrex"B" %2, [%3]\n" \
+ "1: strex"B" %0, %1, [%3]\n" \
" cmp %0, #0\n" \
+ " moveq %1, %2\n" \
" movne %0, %4\n" \
"2:\n" \
" .section .text.fixup,\"ax\"\n" \
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 974b1c606d04..3a10c9f1d0a4 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
data);
data = vcpu_data_host_to_guest(vcpu, data, len);
- *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
+ vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
return 0;
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
rt = vcpu->arch.mmio_decode.rt;
if (is_write) {
- data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
+ data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
+ len);
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
mmio_write_buf(data_buf, len, data);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7dace909d5cf..61d96a645ff3 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
- if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
+ if (!kvm_is_device_pfn(pte_pfn(old_pte)))
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
@@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
- if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
+ if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 0b556968a6da..a9b3b905e661 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
unsigned long context_id;
phys_addr_t target_pc;
- cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
+ cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
return PSCI_RET_INVALID_PARAMS;
}
- target_pc = *vcpu_reg(source_vcpu, 2);
- context_id = *vcpu_reg(source_vcpu, 3);
+ target_pc = vcpu_get_reg(source_vcpu, 2);
+ context_id = vcpu_get_reg(source_vcpu, 3);
kvm_reset_vcpu(vcpu);
@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
- *vcpu_reg(vcpu, 0) = context_id;
+ vcpu_set_reg(vcpu, 0, context_id);
vcpu->arch.power_off = false;
smp_mb(); /* Make sure the above is visible */
@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
- target_affinity = *vcpu_reg(vcpu, 1);
- lowest_affinity_level = *vcpu_reg(vcpu, 2);
+ target_affinity = vcpu_get_reg(vcpu, 1);
+ lowest_affinity_level = vcpu_get_reg(vcpu, 2);
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
int ret = 1;
- unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
- *vcpu_reg(vcpu, 0) = val;
+ vcpu_set_reg(vcpu, 0, val);
return ret;
}
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
- unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
break;
}
- *vcpu_reg(vcpu, 0) = val;
+ vcpu_set_reg(vcpu, 0, val);
return 1;
}
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index d72b90905132..588bbc288396 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
static unsigned long noinline
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
+ unsigned long ua_flags;
int atomic;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
if (tocopy > n)
tocopy = n;
+ ua_flags = uaccess_save_and_enable();
memcpy((void *)to, from, tocopy);
+ uaccess_restore(ua_flags);
to += tocopy;
from += tocopy;
n -= tocopy;
@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
* With frame pointer disabled, tail call optimization kicks in
* as well making this test almost invisible.
*/
- if (n < 64)
- return __copy_to_user_std(to, from, n);
- return __copy_to_user_memcpy(to, from, n);
+ if (n < 64) {
+ unsigned long ua_flags = uaccess_save_and_enable();
+ n = __copy_to_user_std(to, from, n);
+ uaccess_restore(ua_flags);
+ } else {
+ n = __copy_to_user_memcpy(to, from, n);
+ }
+ return n;
}
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
{
+ unsigned long ua_flags;
+
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
memset((void *)addr, 0, n);
return 0;
@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
if (tocopy > n)
tocopy = n;
+ ua_flags = uaccess_save_and_enable();
memset((void *)addr, 0, tocopy);
+ uaccess_restore(ua_flags);
addr += tocopy;
n -= tocopy;
@@ -193,9 +205,14 @@ out:
unsigned long arm_clear_user(void __user *addr, unsigned long n)
{
/* See rational for this in __copy_to_user() above. */
- if (n < 64)
- return __clear_user_std(addr, n);
- return __clear_user_memset(addr, n);
+ if (n < 64) {
+ unsigned long ua_flags = uaccess_save_and_enable();
+ n = __clear_user_std(addr, n);
+ uaccess_restore(ua_flags);
+ } else {
+ n = __clear_user_memset(addr, n);
+ }
+ return n;
}
#if 0
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 92673006e55c..28656c2b54a0 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -4,7 +4,6 @@ menuconfig ARCH_AT91
select ARCH_REQUIRE_GPIOLIB
select COMMON_CLK_AT91
select PINCTRL
- select PINCTRL_AT91
select SOC_BUS
if ARCH_AT91
@@ -17,6 +16,7 @@ config SOC_SAMA5D2
select HAVE_AT91_USB_CLK
select HAVE_AT91_H32MX
select HAVE_AT91_GENERATED_CLK
+ select PINCTRL_AT91PIO4
help
Select this if ou are using one of Atmel's SAMA5D2 family SoC.
@@ -27,6 +27,7 @@ config SOC_SAMA5D3
select HAVE_AT91_UTMI
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
+ select PINCTRL_AT91
help
Select this if you are using one of Atmel's SAMA5D3 family SoC.
This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
@@ -40,6 +41,7 @@ config SOC_SAMA5D4
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
select HAVE_AT91_H32MX
+ select PINCTRL_AT91
help
Select this if you are using one of Atmel's SAMA5D4 family SoC.
@@ -50,6 +52,7 @@ config SOC_AT91RM9200
select CPU_ARM920T
select HAVE_AT91_USB_CLK
select MIGHT_HAVE_PCI
+ select PINCTRL_AT91
select SOC_SAM_V4_V5
select SRAM if PM
help
@@ -65,6 +68,7 @@ config SOC_AT91SAM9
select HAVE_AT91_UTMI
select HAVE_FB_ATMEL
select MEMORY
+ select PINCTRL_AT91
select SOC_SAM_V4_V5
select SRAM if PM
help
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 80e277cfcc8b..23726fb31741 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -41,8 +41,10 @@
* implementation should be moved down into the pinctrl driver and get
* called as part of the generic suspend/resume path.
*/
+#ifdef CONFIG_PINCTRL_AT91
extern void at91_pinctrl_gpio_suspend(void);
extern void at91_pinctrl_gpio_resume(void);
+#endif
static struct {
unsigned long uhp_udp_mask;
@@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
static int at91_pm_enter(suspend_state_t state)
{
+#ifdef CONFIG_PINCTRL_AT91
at91_pinctrl_gpio_suspend();
-
+#endif
switch (state) {
/*
* Suspend-to-RAM is like STANDBY plus slow clock mode, so
@@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
error:
target_state = PM_SUSPEND_ON;
+#ifdef CONFIG_PINCTRL_AT91
at91_pinctrl_gpio_resume();
+#endif
return 0;
}
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index de68938ee6aa..c21e41dad19c 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
{
unsigned int i;
+ const struct exynos_pmu_data *pmu_data;
+
+ if (!pmu_context)
+ return;
- const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data;
+ pmu_data = pmu_context->pmu_data;
if (pmu_data->powerdown_conf)
pmu_data->powerdown_conf(mode);
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index b02439019963..7a0c13bf4269 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
writel(*vaddr++, bus_addr);
}
-static inline unsigned char __indirect_readb(const volatile void __iomem *p)
+static inline u8 __indirect_readb(const volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
@@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
*vaddr++ = readb(bus_addr);
}
-static inline unsigned short __indirect_readw(const volatile void __iomem *p)
+static inline u16 __indirect_readw(const volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
@@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
*vaddr++ = readw(bus_addr);
}
-static inline unsigned long __indirect_readl(const volatile void __iomem *p)
+static inline u32 __indirect_readl(const volatile void __iomem *p)
{
u32 addr = (__force u32)p;
u32 data;
@@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
#define ioread8(p) ioread8(p)
-static inline unsigned int ioread8(const void __iomem *addr)
+static inline u8 ioread8(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
@@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
}
#define ioread16(p) ioread16(p)
-static inline unsigned int ioread16(const void __iomem *addr)
+static inline u16 ioread16(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
@@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
}
#define ioread32(p) ioread32(p)
-static inline unsigned int ioread32(const void __iomem *addr)
+static inline u32 ioread32(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 5076d3f334d2..4b4371db5799 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
select NEON if CPU_V7
select PM
select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
select VFP
@@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CBB
- select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_NOKIA_N810
bool
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 9a9c15bfcd34..7c0d5618be5e 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -889,6 +889,7 @@ static void __init e680_init(void)
pxa_set_keypad_info(&e680_keypad_platform_data);
+ pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e680_devices));
}
@@ -956,6 +957,7 @@ static void __init a1200_init(void)
pxa_set_keypad_info(&a1200_keypad_platform_data);
+ pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
}
@@ -1148,6 +1150,7 @@ static void __init a910_init(void)
platform_device_register(&a910_camera);
}
+ pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(a910_devices));
}
@@ -1215,6 +1218,7 @@ static void __init e6_init(void)
pxa_set_keypad_info(&e6_keypad_platform_data);
+ pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e6_devices));
}
@@ -1256,6 +1260,7 @@ static void __init e2_init(void)
pxa_set_keypad_info(&e2_keypad_platform_data);
+ pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e2_devices));
}
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
index a19460e6e7b0..b355fca6cc2e 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
@@ -20,7 +20,7 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
-static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
+static struct cpufreq_frequency_table s3c2440_plls_12[] = {
{ .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
{ .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
{ .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
index 1191b2905625..be9a248b5ce9 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
@@ -20,7 +20,7 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
-static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
+static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
{ .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
{ .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
{ .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 845769e41332..c8c8b9ed02e0 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
__flush_icache_all();
}
-static int is_reserved_asid(u64 asid)
+static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
int cpu;
- for_each_possible_cpu(cpu)
- if (per_cpu(reserved_asids, cpu) == asid)
- return 1;
- return 0;
+ bool hit = false;
+
+ /*
+ * Iterate over the set of reserved ASIDs looking for a match.
+ * If we find one, then we can update our mm to use newasid
+ * (i.e. the same ASID in the current generation) but we can't
+ * exit the loop early, since we need to ensure that all copies
+ * of the old ASID are updated to reflect the mm. Failure to do
+ * so could result in us missing the reserved ASID in a future
+ * generation.
+ */
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(reserved_asids, cpu) == asid) {
+ hit = true;
+ per_cpu(reserved_asids, cpu) = newasid;
+ }
+ }
+
+ return hit;
}
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
+ u64 newasid = generation | (asid & ~ASID_MASK);
+
/*
* If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
- if (is_reserved_asid(asid))
- return generation | (asid & ~ASID_MASK);
+ if (check_update_reserved_asid(asid, newasid))
+ return newasid;
/*
* We had a valid ASID in a previous life, so try to re-use
@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
*/
asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map))
- goto bump_gen;
+ return newasid;
}
/*
@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
__set_bit(asid, asid_map);
cur_idx = asid;
-
-bump_gen:
- asid |= generation;
cpumask_clear(mm_cpumask(mm));
- return asid;
+ return asid | generation;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e62400e5fb99..534a60ae282e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
- phys_addr_t phys = sg_phys(s) & PAGE_MASK;
+ phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
if (!is_coherent &&
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8a63b4cdc0f2..7f8cd1b3557f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
#include <linux/sizes.h>
+#include <linux/stop_machine.h>
#include <asm/cp15.h>
#include <asm/mach-types.h>
@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
* safe to be called with preemption disabled, as under stop_machine().
*/
static inline void section_update(unsigned long addr, pmdval_t mask,
- pmdval_t prot)
+ pmdval_t prot, struct mm_struct *mm)
{
- struct mm_struct *mm;
pmd_t *pmd;
- mm = current->active_mm;
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
return !!(get_cr() & CR_XP);
}
-#define set_section_perms(perms, field) { \
- size_t i; \
- unsigned long addr; \
- \
- if (!arch_has_strict_perms()) \
- return; \
- \
- for (i = 0; i < ARRAY_SIZE(perms); i++) { \
- if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
- !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
- pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
- perms[i].start, perms[i].end, \
- SECTION_SIZE); \
- continue; \
- } \
- \
- for (addr = perms[i].start; \
- addr < perms[i].end; \
- addr += SECTION_SIZE) \
- section_update(addr, perms[i].mask, \
- perms[i].field); \
- } \
+void set_section_perms(struct section_perm *perms, int n, bool set,
+ struct mm_struct *mm)
+{
+ size_t i;
+ unsigned long addr;
+
+ if (!arch_has_strict_perms())
+ return;
+
+ for (i = 0; i < n; i++) {
+ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
+ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
+ pr_err("BUG: section %lx-%lx not aligned to %lx\n",
+ perms[i].start, perms[i].end,
+ SECTION_SIZE);
+ continue;
+ }
+
+ for (addr = perms[i].start;
+ addr < perms[i].end;
+ addr += SECTION_SIZE)
+ section_update(addr, perms[i].mask,
+ set ? perms[i].prot : perms[i].clear, mm);
+ }
+
}
-static inline void fix_kernmem_perms(void)
+static void update_sections_early(struct section_perm perms[], int n)
{
- set_section_perms(nx_perms, prot);
+ struct task_struct *t, *s;
+
+ read_lock(&tasklist_lock);
+ for_each_process(t) {
+ if (t->flags & PF_KTHREAD)
+ continue;
+ for_each_thread(t, s)
+ set_section_perms(perms, n, true, s->mm);
+ }
+ read_unlock(&tasklist_lock);
+ set_section_perms(perms, n, true, current->active_mm);
+ set_section_perms(perms, n, true, &init_mm);
+}
+
+int __fix_kernmem_perms(void *unused)
+{
+ update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
+ return 0;
+}
+
+void fix_kernmem_perms(void)
+{
+ stop_machine(__fix_kernmem_perms, NULL, NULL);
}
#ifdef CONFIG_DEBUG_RODATA
+int __mark_rodata_ro(void *unused)
+{
+ update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
+ return 0;
+}
+
void mark_rodata_ro(void)
{
- set_section_perms(ro_perms, prot);
+ stop_machine(__mark_rodata_ro, NULL, NULL);
}
void set_kernel_text_rw(void)
{
- set_section_perms(ro_perms, clear);
+ set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
+ current->active_mm);
}
void set_kernel_text_ro(void)
{
- set_section_perms(ro_perms, prot);
+ set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
+ current->active_mm);
}
#endif /* CONFIG_DEBUG_RODATA */
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index de2b246fed38..8e1ea433c3f1 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
- stmfd sp!, {r4 - r10, lr}
+ stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
stmia r0!, {r4 - r5}
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
stmia r0, {r5 - r11}
- ldmfd sp!, {r4 - r10, pc}
+ ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_v7_do_suspend)
ENTRY(cpu_v7_do_resume)
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index e81cd48d6245..925552e7b4f3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -269,6 +269,7 @@
clock-frequency = <0>; /* Updated by bootloader */
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
+ little-endian;
bus-width = <4>;
};
@@ -277,6 +278,7 @@
reg = <0x0 0x2300000 0x0 0x10000>;
interrupts = <0 36 0x4>; /* Level high type */
gpio-controller;
+ little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -287,6 +289,7 @@
reg = <0x0 0x2310000 0x0 0x10000>;
interrupts = <0 36 0x4>; /* Level high type */
gpio-controller;
+ little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -297,6 +300,7 @@
reg = <0x0 0x2320000 0x0 0x10000>;
interrupts = <0 37 0x4>; /* Level high type */
gpio-controller;
+ little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -307,6 +311,7 @@
reg = <0x0 0x2330000 0x0 0x10000>;
interrupts = <0 37 0x4>; /* Level high type */
gpio-controller;
+ little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 030cdcb46c6b..2731d3b25ed2 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -77,6 +77,7 @@
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
+#include <asm/barrier.h>
/*
* Low-level accessors
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3ca894ecf699..25a40213bd9b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -100,13 +100,21 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
}
/*
- * vcpu_reg should always be passed a register number coming from a
- * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
- * with banked registers.
+ * vcpu_get_reg and vcpu_set_reg should always be passed a register number
+ * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
+ * AArch32 with banked registers.
*/
-static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
+ u8 reg_num)
{
- return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+ return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+}
+
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
+ unsigned long val)
+{
+ if (reg_num != 31)
+ vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
}
/* Get vcpu SPSR for current mode */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7e074f93f383..63f52b55defe 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
* hardware updates of the pte (ptep_set_access_flags safely changes
* valid ptes without going through an invalid entry).
*/
- if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
- pte_valid(*ptep)) {
- BUG_ON(!pte_young(pte));
- BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
+ if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
+ pte_valid(*ptep) && pte_valid(pte)) {
+ VM_WARN_ONCE(!pte_young(pte),
+ "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
+ __func__, pte_val(*ptep), pte_val(pte));
+ VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
+ "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
+ __func__, pte_val(*ptep), pte_val(pte));
}
set_pte(ptep, pte);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 1ee2c3937d4e..71426a78db12 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -5,6 +5,7 @@
*/
#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
#include <asm/kernel-pgtable.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@@ -140,7 +141,7 @@ SECTIONS
ARM_EXIT_KEEP(EXIT_DATA)
}
- PERCPU_SECTION(64)
+ PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;
@@ -158,7 +159,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_data = .;
_sdata = .;
- RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
PECOFF_EDATA_PADDING
_edata = .;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 68a0759b1375..15f0477b0d2a 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
- trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
+ trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
ret = kvm_psci_call(vcpu);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87a64e8db04c..d2650e84faf2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*/
static bool access_dcsw(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (!p->is_write)
@@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
* sys_regs and leave it in complete control of the caches.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- unsigned long val;
bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
- val = *vcpu_reg(vcpu, p->Rt);
if (!p->is_aarch32) {
- vcpu_sys_reg(vcpu, r->reg) = val;
+ vcpu_sys_reg(vcpu, r->reg) = p->regval;
} else {
if (!p->is_32bit)
- vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
- vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
+ vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
+ vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
}
kvm_toggle_cache(vcpu, was_enabled);
@@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
* for both AArch64 and AArch32 accesses.
*/
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 val;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- val = *vcpu_reg(vcpu, p->Rt);
- vgic_v3_dispatch_sgi(vcpu, val);
+ vgic_v3_dispatch_sgi(vcpu, p->regval);
return true;
}
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
@@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
}
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
return ignore_write(vcpu, p);
} else {
- *vcpu_reg(vcpu, p->Rt) = (1 << 3);
+ p->regval = (1 << 3);
return true;
}
}
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
} else {
u32 val;
asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
- *vcpu_reg(vcpu, p->Rt) = val;
+ p->regval = val;
return true;
}
}
@@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
* now use the debug registers.
*/
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
- vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu_sys_reg(vcpu, r->reg) = p->regval;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
- *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+ p->regval = vcpu_sys_reg(vcpu, r->reg);
}
- trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
+ trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
return true;
}
@@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
* hyp.S code switches between host and guest values in future.
*/
static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
u64 *dbg_reg)
{
- u64 val = *vcpu_reg(vcpu, p->Rt);
+ u64 val = p->regval;
if (p->is_32bit) {
val &= 0xffffffffUL;
@@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
}
static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
u64 *dbg_reg)
{
- u64 val = *dbg_reg;
-
+ p->regval = *dbg_reg;
if (p->is_32bit)
- val &= 0xffffffffUL;
-
- *vcpu_reg(vcpu, p->Rt) = val;
+ p->regval &= 0xffffffffUL;
}
static inline bool trap_bvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
}
static inline bool trap_bcr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
@@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
}
static inline bool trap_wvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
@@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
}
static inline bool trap_wcr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
@@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
};
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
@@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
- *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
- (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
- (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
- (6 << 16) | (el3 << 14) | (el3 << 12));
+ p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
+ (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
+ (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
+ | (6 << 16) | (el3 << 14) | (el3 << 12));
return true;
}
}
static bool trap_debug32(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
- vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu_cp14(vcpu, r->reg) = p->regval;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
- *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
+ p->regval = vcpu_cp14(vcpu, r->reg);
}
return true;
@@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
*/
static inline bool trap_xvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
u64 val = *dbg_reg;
val &= 0xffffffffUL;
- val |= *vcpu_reg(vcpu, p->Rt) << 32;
+ val |= p->regval << 32;
*dbg_reg = val;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
- *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
+ p->regval = *dbg_reg >> 32;
}
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
@@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
* Return 0 if the access has been handled, and -1 if not.
*/
static int emulate_cp(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *params,
+ struct sys_reg_params *params,
const struct sys_reg_desc *table,
size_t num)
{
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int Rt = (hsr >> 5) & 0xf;
int Rt2 = (hsr >> 10) & 0xf;
params.is_aarch32 = true;
params.is_32bit = false;
params.CRm = (hsr >> 1) & 0xf;
- params.Rt = (hsr >> 5) & 0xf;
params.is_write = ((hsr & 1) == 0);
params.Op0 = 0;
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
params.CRn = 0;
/*
- * Massive hack here. Store Rt2 in the top 32bits so we only
- * have one register to deal with. As we use the same trap
+ * Make a 64-bit value out of Rt and Rt2. As we use the same trap
* backends between AArch32 and AArch64, we get away with it.
*/
if (params.is_write) {
- u64 val = *vcpu_reg(vcpu, params.Rt);
- val &= 0xffffffff;
- val |= *vcpu_reg(vcpu, Rt2) << 32;
- *vcpu_reg(vcpu, params.Rt) = val;
+ params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
+ params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
}
if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
unhandled_cp_access(vcpu, &params);
out:
- /* Do the opposite hack for the read side */
+ /* Split up the value between registers for the read side */
if (!params.is_write) {
- u64 val = *vcpu_reg(vcpu, params.Rt);
- val >>= 32;
- *vcpu_reg(vcpu, Rt2) = val;
+ vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
+ vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
}
return 1;
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int Rt = (hsr >> 5) & 0xf;
params.is_aarch32 = true;
params.is_32bit = true;
params.CRm = (hsr >> 1) & 0xf;
- params.Rt = (hsr >> 5) & 0xf;
+ params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = ((hsr & 1) == 0);
params.CRn = (hsr >> 10) & 0xf;
params.Op0 = 0;
params.Op1 = (hsr >> 14) & 0x7;
params.Op2 = (hsr >> 17) & 0x7;
- if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
- return 1;
- if (!emulate_cp(vcpu, &params, global, nr_global))
+ if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
+ !emulate_cp(vcpu, &params, global, nr_global)) {
+ if (!params.is_write)
+ vcpu_set_reg(vcpu, Rt, params.regval);
return 1;
+ }
unhandled_cp_access(vcpu, &params);
return 1;
@@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *params)
+ struct sys_reg_params *params)
{
size_t num;
const struct sys_reg_desc *table, *r;
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+ int Rt = (esr >> 5) & 0x1f;
+ int ret;
trace_kvm_handle_sys_reg(esr);
@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.CRn = (esr >> 10) & 0xf;
params.CRm = (esr >> 1) & 0xf;
params.Op2 = (esr >> 17) & 0x7;
- params.Rt = (esr >> 5) & 0x1f;
+ params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = !(esr & 1);
- return emulate_sys_reg(vcpu, &params);
+ ret = emulate_sys_reg(vcpu, &params);
+
+ if (!params.is_write)
+ vcpu_set_reg(vcpu, Rt, params.regval);
+ return ret;
}
/******************************************************************************
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index eaa324e4db4d..dbbb01cfbee9 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -28,7 +28,7 @@ struct sys_reg_params {
u8 CRn;
u8 CRm;
u8 Op2;
- u8 Rt;
+ u64 regval;
bool is_write;
bool is_aarch32;
bool is_32bit; /* Only valid if is_aarch32 is true */
@@ -44,7 +44,7 @@ struct sys_reg_desc {
/* Trapped access from guest, if non-NULL. */
bool (*access)(struct kvm_vcpu *,
- const struct sys_reg_params *,
+ struct sys_reg_params *,
const struct sys_reg_desc *);
/* Initialization for vcpu. */
@@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
}
static inline bool read_zero(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p)
+ struct sys_reg_params *p)
{
- *vcpu_reg(vcpu, p->Rt) = 0;
+ p->regval = 0;
return true;
}
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 1e4576824165..ed90578fa120 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -31,13 +31,13 @@
#include "sys_regs.h"
static bool access_actlr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
+ struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
- *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
+ p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
return true;
}
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 1e9c8b0bf486..170d786807c4 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -14,7 +14,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
*
* ppc:
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index db73390568c8..74c132d901bd 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
-#define NR_syscalls 322 /* length of syscall table */
+#define NR_syscalls 323 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 9038726e7d26..762edce7572e 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -335,5 +335,6 @@
#define __NR_userfaultfd 1343
#define __NR_membarrier 1344
#define __NR_kcmp 1345
+#define __NR_mlock2 1346
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index dcd97f84d065..534a74acb849 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1771,5 +1771,6 @@ sys_call_table:
data8 sys_userfaultfd
data8 sys_membarrier
data8 sys_kcmp // 1345
+ data8 sys_mlock2
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index c89da6312954..bf4dec229437 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
/* FIXME this part of code is untested */
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg);
- __dma_sync(sg_phys(sg), sg->length, direction);
+ __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
+ sg->length, direction);
}
return nents;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index d8117be729a2..730d394ce5f0 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
gfp = massage_gfp_flags(dev, gfp);
- if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
+ if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
page = dma_alloc_from_contiguous(dev,
count, get_order(size));
if (!page)
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index d8534f95915a..291cee28ccb6 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
*/
#ifdef CONFIG_HUGETLB_PAGE
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
-#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
+ (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
#else
#define pte_huge(pte) (0)
#define pte_mkhuge(pte) (pte)
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 33170384d3ac..35bdccbb2036 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -360,8 +360,9 @@
#define __NR_execveat (__NR_Linux + 342)
#define __NR_membarrier (__NR_Linux + 343)
#define __NR_userfaultfd (__NR_Linux + 344)
+#define __NR_mlock2 (__NR_Linux + 345)
-#define __NR_Linux_syscalls (__NR_userfaultfd + 1)
+#define __NR_Linux_syscalls (__NR_mlock2 + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 64f2764a8cef..c99f3dde455c 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
}
-void __init pcibios_init_bus(struct pci_bus *bus)
-{
- struct pci_dev *dev = bus->self;
- unsigned short bridge_ctl;
-
- /* We deal only with pci controllers and pci-pci bridges. */
- if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
- return;
-
- /* PCI-PCI bridge - set the cache line and default latency
- (32) for primary and secondary buses. */
- pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
-
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
- bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
-}
-
/*
* pcibios align resources() is called every time generic PCI code
* wants to generate a new address. The process of looking for
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 78c3ef8c348d..d4ffcfbc9885 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -440,6 +440,7 @@
ENTRY_COMP(execveat)
ENTRY_SAME(membarrier)
ENTRY_SAME(userfaultfd)
+ ENTRY_SAME(mlock2) /* 345 */
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index 631ede72e226..68f0ed7626bd 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -227,23 +227,15 @@
reg = <0x520 0x20>;
phy0: ethernet-phy@1f {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
reg = <0x1f>;
};
phy1: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
reg = <0>;
};
phy2: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
reg = <1>;
};
phy3: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
reg = <2>;
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 80dfe8965df9..8d14feb40f12 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
- /*
- * If it's PHB PE, the frozen state on all available PEs should have
- * been cleared by the PHB reset. Otherwise, we unfreeze the PE and its
- * child PEs because they might be in frozen state.
- */
- if (!(pe->type & EEH_PE_PHB)) {
- rc = eeh_clear_pe_frozen_state(pe, false);
- if (rc)
- return rc;
- }
+ /* Clear frozen state */
+ rc = eeh_clear_pe_frozen_state(pe, false);
+ if (rc)
+ return rc;
/* Give the system 5 seconds to finish running the user-space
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index 6ccfb6c1c707..0a00e2aed393 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
static unsigned int *opal_irqs;
static void opal_handle_irq_work(struct irq_work *work);
-static __be64 last_outstanding_events;
+static u64 last_outstanding_events;
static struct irq_work opal_event_irq_work = {
.func = opal_handle_irq_work,
};
+void opal_handle_events(uint64_t events)
+{
+ int virq, hwirq = 0;
+ u64 mask = opal_event_irqchip.mask;
+
+ if (!in_irq() && (events & mask)) {
+ last_outstanding_events = events;
+ irq_work_queue(&opal_event_irq_work);
+ return;
+ }
+
+ while (events & mask) {
+ hwirq = fls64(events) - 1;
+ if (BIT_ULL(hwirq) & mask) {
+ virq = irq_find_mapping(opal_event_irqchip.domain,
+ hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ events &= ~BIT_ULL(hwirq);
+ }
+}
+
static void opal_event_mask(struct irq_data *d)
{
clear_bit(d->hwirq, &opal_event_irqchip.mask);
@@ -55,12 +78,12 @@ static void opal_event_mask(struct irq_data *d)
static void opal_event_unmask(struct irq_data *d)
{
+ __be64 events;
+
set_bit(d->hwirq, &opal_event_irqchip.mask);
- opal_poll_events(&last_outstanding_events);
- if (last_outstanding_events & opal_event_irqchip.mask)
- /* Need to retrigger the interrupt */
- irq_work_queue(&opal_event_irq_work);
+ opal_poll_events(&events);
+ opal_handle_events(be64_to_cpu(events));
}
static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
@@ -96,29 +119,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-void opal_handle_events(uint64_t events)
-{
- int virq, hwirq = 0;
- u64 mask = opal_event_irqchip.mask;
-
- if (!in_irq() && (events & mask)) {
- last_outstanding_events = events;
- irq_work_queue(&opal_event_irq_work);
- return;
- }
-
- while (events & mask) {
- hwirq = fls64(events) - 1;
- if (BIT_ULL(hwirq) & mask) {
- virq = irq_find_mapping(opal_event_irqchip.domain,
- hwirq);
- if (virq)
- generic_handle_irq(virq);
- }
- events &= ~BIT_ULL(hwirq);
- }
-}
-
static irqreturn_t opal_interrupt(int irq, void *data)
{
__be64 events;
@@ -131,7 +131,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
static void opal_handle_irq_work(struct irq_work *work)
{
- opal_handle_events(be64_to_cpu(last_outstanding_events));
+ opal_handle_events(last_outstanding_events);
}
static int opal_event_match(struct irq_domain *h, struct device_node *node,
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index e6820c86e8c7..47ebd5b5ed55 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -278,7 +278,7 @@
#define __NR_fsetxattr 256
#define __NR_getxattr 257
#define __NR_lgetxattr 258
-#define __NR_fgetxattr 269
+#define __NR_fgetxattr 259
#define __NR_listxattr 260
#define __NR_llistxattr 261
#define __NR_flistxattr 262
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7cfd7f153966..4dca18347ee9 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -10,7 +10,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
*
* ppc:
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b0da5aedb336..3091267c5cc3 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -9,7 +9,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/perf_event.h>
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index bb509cee3b59..8767060d70fb 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -21,7 +21,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*/
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 25ed4098640e..e3abe6f3156d 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
# The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
-LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt
+LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
# Used by link-vmlinux.sh which has special support for um link
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
diff --git a/arch/um/drivers/net_user.c b/arch/um/drivers/net_user.c
index e697a4136707..e9f8445861dc 100644
--- a/arch/um/drivers/net_user.c
+++ b/arch/um/drivers/net_user.c
@@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
char *split_if_spec(char *str, ...)
{
- char **arg, *end;
+ char **arg, *end, *ret = NULL;
va_list ap;
va_start(ap, str);
while ((arg = va_arg(ap, char **)) != NULL) {
if (*str == '\0')
- return NULL;
+ goto out;
end = strchr(str, ',');
if (end != str)
*arg = str;
if (end == NULL)
- return NULL;
+ goto out;
*end++ = '\0';
str = end;
}
+ ret = str;
+out:
va_end(ap);
- return str;
+ return ret;
}
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 57acbd67d85d..fc8be0e3a4ff 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
struct ksignal ksig;
int handled_sig = 0;
- while (get_signal(&ksig)) {
+ if (get_signal(&ksig)) {
handled_sig = 1;
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 0033e96c3f09..9011a88353de 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -23,7 +23,6 @@
#include <stdarg.h>
#include <linux/types.h>
#include <linux/edd.h>
-#include <asm/boot.h>
#include <asm/setup.h>
#include "bitops.h"
#include "ctype.h"
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c
index aa8a96b052e3..95c7a818c0ed 100644
--- a/arch/x86/boot/video-mode.c
+++ b/arch/x86/boot/video-mode.c
@@ -19,6 +19,8 @@
#include "video.h"
#include "vesa.h"
+#include <uapi/asm/boot.h>
+
/*
* Common variables
*/
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index 05111bb8d018..77780e386e9b 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -13,6 +13,8 @@
* Select video mode
*/
+#include <uapi/asm/boot.h>
+
#include "boot.h"
#include "video.h"
#include "vesa.h"
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 53616ca03244..a55697d19824 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -509,6 +509,17 @@ END(irq_entries_start)
* tracking that we're in kernel mode.
*/
SWAPGS
+
+ /*
+ * We need to tell lockdep that IRQs are off. We can't do this until
+ * we fix gsbase, and we should do it before enter_from_user_mode
+ * (which can take locks). Since TRACE_IRQS_OFF idempotent,
+ * the simplest way to handle it is to just call it twice if
+ * we enter from user mode. There's no reason to optimize this since
+ * TRACE_IRQS_OFF is a no-op if lockdep is off.
+ */
+ TRACE_IRQS_OFF
+
#ifdef CONFIG_CONTEXT_TRACKING
call enter_from_user_mode
#endif
@@ -1049,12 +1060,18 @@ ENTRY(error_entry)
SWAPGS
.Lerror_entry_from_usermode_after_swapgs:
+ /*
+ * We need to tell lockdep that IRQs are off. We can't do this until
+ * we fix gsbase, and we should do it before enter_from_user_mode
+ * (which can take locks).
+ */
+ TRACE_IRQS_OFF
#ifdef CONFIG_CONTEXT_TRACKING
call enter_from_user_mode
#endif
+ ret
.Lerror_entry_done:
-
TRACE_IRQS_OFF
ret
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index c5b7fb2774d0..cc071c6f7d4d 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -9,19 +9,21 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
+
+#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
-/* Cast PAGE_MASK to a signed type so that it is sign-extended if
+/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
-
-#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
-#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
-
-#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
-#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
+#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index dd5b0aa9dd2f..a471cadb9630 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
static inline pudval_t pud_pfn_mask(pud_t pud)
{
if (native_pud_val(pud) & _PAGE_PSE)
- return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK;
+ return PHYSICAL_PUD_PAGE_MASK;
else
return PTE_PFN_MASK;
}
static inline pudval_t pud_flags_mask(pud_t pud)
{
- if (native_pud_val(pud) & _PAGE_PSE)
- return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
- else
- return ~PTE_PFN_MASK;
+ return ~pud_pfn_mask(pud);
}
static inline pudval_t pud_flags(pud_t pud)
@@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
{
if (native_pmd_val(pmd) & _PAGE_PSE)
- return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK;
+ return PHYSICAL_PMD_PAGE_MASK;
else
return PTE_PFN_MASK;
}
static inline pmdval_t pmd_flags_mask(pmd_t pmd)
{
- if (native_pmd_val(pmd) & _PAGE_PSE)
- return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
- else
- return ~PTE_PFN_MASK;
+ return ~pmd_pfn_mask(pmd);
}
static inline pmdval_t pmd_flags(pmd_t pmd)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 48d34d28f5a6..cd0fc0cc78bc 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_PLATFORM_H
#define _ASM_X86_PLATFORM_H
-#include <asm/pgtable_types.h>
#include <asm/bootparam.h>
struct mpc_bus;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 7fc27f1cca58..b3e94ef461fd 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -698,3 +698,4 @@ int __init microcode_init(void)
return error;
}
+late_initcall(microcode_init);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4562cf070c27..2bf79d7c97df 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 499f533dd3cc..d0e35ebb2adb 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*
@@ -387,7 +387,7 @@ struct cpu_hw_events {
/* Check flags and event code/umask, and set the HSW N/A flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
__EVENT_CONSTRAINT(code, n, \
- INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
+ INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
@@ -627,6 +627,7 @@ struct x86_perf_task_context {
u64 lbr_from[MAX_LBR_ENTRIES];
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
+ int tos;
int lbr_callstack_users;
int lbr_stack_state;
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f63360be2238..e2a430021e46 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
- INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
+ INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8ed391..a316ca96f1b6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
{
if (event->attach_state & PERF_ATTACH_TASK)
- return perf_cgroup_from_task(event->hw.target);
+ return perf_cgroup_from_task(event->hw.target, event->ctx);
return event->cgrp;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index bfd0b717e944..659f01e165d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
}
mask = x86_pmu.lbr_nr - 1;
- tos = intel_pmu_lbr_tos();
+ tos = task_ctx->tos;
for (i = 0; i < tos; i++) {
lbr_idx = (tos - i) & mask;
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+ wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
}
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+ task_ctx->tos = tos;
task_ctx->lbr_stack_state = LBR_VALID;
}
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index dc5fa6a1e8d6..3512ba607361 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -1,7 +1,7 @@
/*
* x86 specific code for irq_work
*
- * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/kernel.h>
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 4f00b63d7ff3..14415aff1813 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -4,10 +4,22 @@
*/
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/ioport.h>
+
+static int found(u64 start, u64 end, void *data)
+{
+ return 1;
+}
static __init int register_e820_pmem(void)
{
+ char *pmem = "Persistent Memory (legacy)";
struct platform_device *pdev;
+ int rc;
+
+ rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
+ if (rc <= 0)
+ return 0;
/*
* See drivers/nvdimm/e820.c for the implementation, this is
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 29db25f9a745..d2bbe343fda7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled(EFI_BOOT))
efi_apply_memmap_quirks();
#endif
-
- microcode_init();
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index b7ffb7c00075..cb6282c3638f 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
signal_setup_done(failed, ksig, stepping);
}
-#ifdef CONFIG_X86_32
-#define NR_restart_syscall __NR_restart_syscall
-#else /* !CONFIG_X86_32 */
-#define NR_restart_syscall \
- test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
-#endif /* CONFIG_X86_32 */
+static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
+{
+#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
+ return __NR_restart_syscall;
+#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
+ return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
+ __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
+#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
+}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
break;
case -ERESTART_RESTARTBLOCK:
- regs->ax = NR_restart_syscall;
+ regs->ax = get_nr_restart_syscall(regs);
regs->ip -= 2;
break;
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 892ee2e5ecbc..fbabe4fcc7fb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
*/
#define UDELAY_10MS_DEFAULT 10000
-static unsigned int init_udelay = INT_MAX;
+static unsigned int init_udelay = UINT_MAX;
static int __init cpu_init_udelay(char *str)
{
@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
static void __init smp_quirk_init_udelay(void)
{
/* if cmdline changed it from default, leave it alone */
- if (init_udelay != INT_MAX)
+ if (init_udelay != UINT_MAX)
return;
/* if modern processor, use no delay */
if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
- ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
+ ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
init_udelay = 0;
-
+ return;
+ }
/* else, use legacy delay */
init_udelay = UDELAY_10MS_DEFAULT;
}
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a035c2aa7801..0f1c6fc3ddd8 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
{ 0/* VMALLOC_START */, "vmalloc() Area" },
{ 0/*VMALLOC_END*/, "vmalloc() End" },
# ifdef CONFIG_HIGHMEM
- { 0/*PKMAP_BASE*/, "Persisent kmap() Area" },
+ { 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
# endif
{ 0/*FIXADDR_START*/, "Fixmap Area" },
#endif
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 1202d5ca2fb5..b2fd67da1701 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
switch (type) {
case REG_TYPE_RM:
regno = X86_MODRM_RM(insn->modrm.value);
- if (X86_REX_B(insn->rex_prefix.value) == 1)
+ if (X86_REX_B(insn->rex_prefix.value))
regno += 8;
break;
case REG_TYPE_INDEX:
regno = X86_SIB_INDEX(insn->sib.value);
- if (X86_REX_X(insn->rex_prefix.value) == 1)
+ if (X86_REX_X(insn->rex_prefix.value))
regno += 8;
break;
case REG_TYPE_BASE:
regno = X86_SIB_BASE(insn->sib.value);
- if (X86_REX_B(insn->rex_prefix.value) == 1)
+ if (X86_REX_B(insn->rex_prefix.value))
regno += 8;
break;
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 7bcf06a7cd12..6eb3c8af96e2 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
if (!found)
pci_add_resource(resources, &info->busn);
- list_for_each_entry(root_res, &info->resources, list) {
- struct resource *res;
- struct resource *root;
+ list_for_each_entry(root_res, &info->resources, list)
+ pci_add_resource(resources, &root_res->res);
- res = &root_res->res;
- pci_add_resource(resources, res);
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- else
- root = &iomem_resource;
- insert_resource(root, res);
- }
return;
default_resources:
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 06934a8a4872..e5f854ce2d72 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
if (err)
return 1;
- err = convert_fxsr_from_user(&fpx, sc.fpstate);
+ err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
if (err)
return 1;
@@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
{
struct user_i387_struct fp;
- err = copy_from_user(&fp, sc.fpstate,
+ err = copy_from_user(&fp, (void *)sc.fpstate,
sizeof(struct user_i387_struct));
if (err)
return 1;
@@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
#endif
#undef PUTREG
sc.oldmask = mask;
- sc.fpstate = to_fp;
+ sc.fpstate = (unsigned long)to_fp;
err = copy_to_user(to, &sc, sizeof(struct sigcontext));
if (err)
@@ -468,12 +468,10 @@ long sys_sigreturn(void)
struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
sigset_t set;
struct sigcontext __user *sc = &frame->sc;
- unsigned long __user *oldmask = &sc->oldmask;
- unsigned long __user *extramask = frame->extramask;
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
- if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
- copy_from_user(&set.sig[1], extramask, sig_size))
+ if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
+ copy_from_user(&set.sig[1], frame->extramask, sig_size))
goto segfault;
set_current_blocked(&set);
@@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
+ unsigned long fp_to;
frame = (struct rt_sigframe __user *)
round_down(stack_top - sizeof(struct rt_sigframe), 16);
@@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
set->sig[0]);
- err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
+
+ fp_to = (unsigned long)&frame->fpstate;
+
+ err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
if (sizeof(*set) == 16) {
err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5bcdfc10c23a..5a37188b559f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
-static int blkcg_can_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
+ struct cgroup_subsys_state *dst_css;
struct io_context *ioc;
int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */
- cgroup_taskset_for_each(task, tset) {
+ cgroup_taskset_for_each(task, dst_css, tset) {
task_lock(task);
ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
diff --git a/block/blk-core.c b/block/blk-core.c
index a0af4043dda2..3636be469fa2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3405,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
{
int ret = 0;
+ if (!q->dev)
+ return ret;
+
spin_lock_irq(q->queue_lock);
if (q->nr_pending) {
ret = -EBUSY;
@@ -3432,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
*/
void blk_post_runtime_suspend(struct request_queue *q, int err)
{
+ if (!q->dev)
+ return;
+
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_SUSPENDED;
@@ -3456,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
*/
void blk_pre_runtime_resume(struct request_queue *q)
{
+ if (!q->dev)
+ return;
+
spin_lock_irq(q->queue_lock);
q->rpm_status = RPM_RESUMING;
spin_unlock_irq(q->queue_lock);
@@ -3478,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
*/
void blk_post_runtime_resume(struct request_queue *q, int err)
{
+ if (!q->dev)
+ return;
+
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_ACTIVE;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 7d8f129a1516..dd4973583978 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->virt_boundary_mask = 0;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
- lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
+ BLK_SAFE_MAX_SECTORS;
lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0;
@@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->max_hw_sectors = UINT_MAX;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
+ lim->max_dev_sectors = UINT_MAX;
lim->max_write_same_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
- * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
- * @limits: the queue limits
+ * blk_queue_max_hw_sectors - set max sectors for a request for this queue
+ * @q: the request queue for the device
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
@@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* the device driver based upon the capabilities of the I/O
* controller.
*
+ * max_dev_sectors is a hard limit imposed by the storage device for
+ * READ/WRITE requests. It is set by the disk driver.
+ *
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
* The soft limit can not exceed max_hw_sectors.
**/
-void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
{
+ struct queue_limits *limits = &q->limits;
+ unsigned int max_sectors;
+
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
@@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
}
limits->max_hw_sectors = max_hw_sectors;
- limits->max_sectors = min_t(unsigned int, max_hw_sectors,
- BLK_DEF_MAX_SECTORS);
-}
-EXPORT_SYMBOL(blk_limits_max_hw_sectors);
-
-/**
- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
- * @q: the request queue for the device
- * @max_hw_sectors: max hardware sectors in the usual 512b unit
- *
- * Description:
- * See description for blk_limits_max_hw_sectors().
- **/
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-{
- blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
+ max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
+ max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
+ limits->max_sectors = max_sectors;
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
@@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+ t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_same_sectors = min(t->max_write_same_sectors,
b->max_write_same_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 565b8dac5782..e140cc487ce1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
+ max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
+ q->limits.max_dev_sectors >> 1);
+
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index b4ffc5be1a93..e5b5721809e2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
+ walk->iv = req->info;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
walk->iv_buffer = NULL;
- walk->iv = req->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) {
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 11b981492031..8cc1622b2ee0 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
+ walk->iv = desc->info;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
walk->buffer = NULL;
- walk->iv = desc->info;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = blkcipher_copy_iv(walk);
if (err)
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 25dbb76c02cc..5eef4cb4f70e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED
bool
config ACPI_DEBUGGER
- bool "In-kernel debugger (EXPERIMENTAL)"
+ bool "AML debugger interface (EXPERIMENTAL)"
select ACPI_DEBUG
help
- Enable in-kernel debugging facilities: statistics, internal
+ Enable in-kernel debugging of AML facilities: statistics, internal
object dump, single step control method execution.
This is still under development, currently enabling this only
results in the compilation of the ACPICA debugger files.
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index f7dab53b352a..aa45d4802707 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_system_address *spa)
{
+ size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_spa *nfit_spa;
list_for_each_entry(nfit_spa, &prev->spas, list) {
- if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
+ if (memcmp(nfit_spa->spa, spa, length) == 0) {
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
return true;
}
@@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_memory_map *memdev)
{
+ size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_memdev *nfit_memdev;
list_for_each_entry(nfit_memdev, &prev->memdevs, list)
- if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
+ if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
return true;
}
@@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_control_region *dcr)
{
+ size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_dcr *nfit_dcr;
list_for_each_entry(nfit_dcr, &prev->dcrs, list)
- if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) {
+ if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
return true;
}
@@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_data_region *bdw)
{
+ size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_bdw *nfit_bdw;
list_for_each_entry(nfit_bdw, &prev->bdws, list)
- if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
+ if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
return true;
}
@@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_interleave *idt)
{
+ size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_idt *nfit_idt;
list_for_each_entry(nfit_idt, &prev->idts, list)
- if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) {
+ if (memcmp(nfit_idt->idt, idt, length) == 0) {
list_move_tail(&nfit_idt->list, &acpi_desc->idts);
return true;
}
@@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_flush_address *flush)
{
+ size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_flush *nfit_flush;
list_for_each_entry(nfit_flush, &prev->flushes, list)
- if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) {
+ if (memcmp(nfit_flush->flush, flush, length) == 0) {
list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
return true;
}
@@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev,
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision);
+ return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
}
static DEVICE_ATTR_RO(revision);
@@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
data = (u8 *) acpi_desc->nfit;
end = data + sz;
- data += sizeof(struct acpi_table_nfit);
while (!IS_ERR_OR_NULL(data))
data = add_table(acpi_desc, &prev, data, end);
@@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev)
return PTR_ERR(acpi_desc);
}
- acpi_desc->nfit = (struct acpi_table_nfit *) tbl;
+ /*
+ * Save the acpi header for later and then skip it,
+ * making nfit point to the first nfit table header.
+ */
+ acpi_desc->acpi_header = *tbl;
+ acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
+ sz -= sizeof(struct acpi_table_nfit);
/* Evaluate _FIT and override with that if present */
status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
if (ACPI_SUCCESS(status) && buf.length > 0) {
- acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
- sz = buf.length;
+ union acpi_object *obj;
+ /*
+ * Adjust for the acpi_object header of the _FIT
+ */
+ obj = buf.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ acpi_desc->nfit =
+ (struct acpi_nfit_header *)obj->buffer.pointer;
+ sz = obj->buffer.length;
+ } else
+ dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
+ __func__, (int) obj->type);
}
rc = acpi_nfit_init(acpi_desc, sz);
@@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_table_nfit *nfit_saved;
+ struct acpi_nfit_header *nfit_saved;
+ union acpi_object *obj;
struct device *dev = &adev->dev;
acpi_status status;
int ret;
@@ -1788,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
if (!dev->driver) {
/* dev->driver may be null if we're being removed */
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
- return;
+ goto out_unlock;
}
if (!acpi_desc) {
@@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
}
nfit_saved = acpi_desc->nfit;
- acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
- ret = acpi_nfit_init(acpi_desc, buf.length);
- if (!ret) {
- /* Merge failed, restore old nfit, and exit */
- acpi_desc->nfit = nfit_saved;
- dev_err(dev, "failed to merge updated NFIT\n");
+ obj = buf.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ acpi_desc->nfit =
+ (struct acpi_nfit_header *)obj->buffer.pointer;
+ ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
+ if (ret) {
+ /* Merge failed, restore old nfit, and exit */
+ acpi_desc->nfit = nfit_saved;
+ dev_err(dev, "failed to merge updated NFIT\n");
+ }
+ } else {
+ /* Bad _FIT, restore old nfit */
+ dev_err(dev, "Invalid _FIT\n");
}
kfree(buf.pointer);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 2ea5c0797c8f..3d549a383659 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -96,7 +96,8 @@ struct nfit_mem {
struct acpi_nfit_desc {
struct nvdimm_bus_descriptor nd_desc;
- struct acpi_table_nfit *nfit;
+ struct acpi_table_header acpi_header;
+ struct acpi_nfit_header *nfit;
struct mutex spa_map_mutex;
struct mutex init_mutex;
struct list_head spa_maps;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 850d7bf0c873..ae3fe4e64203 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
else
continue;
+ /*
+ * Some legacy x86 host bridge drivers use iomem_resource and
+ * ioport_resource as default resource pool, skip it.
+ */
+ if (res == root)
+ continue;
+
conflict = insert_resource_conflict(root, res);
if (conflict) {
dev_info(&info->bridge->dev,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ff02bb4218fc..cdfbcc54821f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 8490d37aee2a..f7a7fa81740e 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
}
+#ifdef CONFIG_PM_SLEEP
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
{
return ahci_platform_suspend_host(&pdev->dev);
@@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
return ahci_platform_resume_host(&pdev->dev);
}
+#else
+#define ahci_mvebu_suspend NULL
+#define ahci_mvebu_resume NULL
+#endif
static const struct ata_port_info ahci_mvebu_port_info = {
.flags = AHCI_FLAG_COMMON,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 096064cd6c52..4665512dae44 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
ata_tf_to_fis(tf, pmp, is_cmd, fis);
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+ /* set port value for softreset of Port Multiplier */
+ if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
+ tmp = readl(port_mmio + PORT_FBS);
+ tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+ tmp |= pmp << PORT_FBS_DEV_OFFSET;
+ writel(tmp, port_mmio + PORT_FBS);
+ pp->fbs_last_dev = pmp;
+ }
+
/* issue & wait */
writel(1, port_mmio + PORT_CMD_ISSUE);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index cb0508af1459..961acc788f44 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors)
{
+ unsigned long ap_flags = dev->link->ap->flags;
struct ata_taskfile tf;
unsigned int err_mask;
bool dma = false;
DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
+ /*
+ * Return error without actually issuing the command on controllers
+ * which e.g. lockup on a read log page.
+ */
+ if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
+ return AC_ERR_DEV;
+
retry:
ata_tf_init(dev, &tf);
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 5389579c5120..a723ae929783 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -45,7 +45,8 @@ enum {
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
- ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
+ ATA_FLAG_PMP | ATA_FLAG_NCQ |
+ ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index dea6edcbf145..29bcff086bce 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
unsigned int n, quirks = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
+ /* This controller doesn't support trim */
+ dev->horkage |= ATA_HORKAGE_NOTRIM;
+
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
for (n = 0; sil_blacklist[n].product; n++)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2804aed3f416..25425d3f2575 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
if (mem->state == MEM_OFFLINE)
return 0;
+ /* Can't offline block with non-present sections */
+ if (mem->section_count != sections_per_block)
+ return -EINVAL;
+
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e03b1ad25a90..167418e73445 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev)
}
pd = of_genpd_get_from_provider(&pd_args);
+ of_node_put(pd_args.np);
if (IS_ERR(pd)) {
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- of_node_put(dev->of_node);
return -EPROBE_DEFER;
}
@@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev)
if (ret < 0) {
dev_err(dev, "failed to add to PM domain %s: %d",
pd->name, ret);
- of_node_put(dev->of_node);
goto out;
}
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index e60dd12e23aa..1e937ac5f456 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
struct gpd_timing_data *td;
s64 constraint_ns;
- if (!pdd->dev->driver)
- continue;
-
/*
* Check if the device is allowed to be off long enough for the
* domain to turn off and on (that's how much time it will
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 0c3940ec5e62..8162475d96b5 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -444,8 +444,9 @@ static void null_lnvm_end_io(struct request *rq, int error)
blk_put_request(rq);
}
-static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
+static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
+ struct request_queue *q = dev->q;
struct request *rq;
struct bio *bio = rqd->bio;
@@ -470,7 +471,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
return 0;
}
-static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
+static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
sector_t size = gb * 1024 * 1024 * 1024ULL;
sector_t blksize;
@@ -523,7 +524,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
return 0;
}
-static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
+static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
{
mempool_t *virtmem_pool;
@@ -541,7 +542,7 @@ static void null_lnvm_destroy_dma_pool(void *pool)
mempool_destroy(pool);
}
-static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
+static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return mempool_alloc(pool, mem_flags);
@@ -765,7 +766,9 @@ out:
static int __init null_init(void)
{
+ int ret = 0;
unsigned int i;
+ struct nullb *nullb;
if (bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
@@ -807,22 +810,29 @@ static int __init null_init(void)
0, 0, NULL);
if (!ppa_cache) {
pr_err("null_blk: unable to create ppa cache\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_ppa;
}
}
for (i = 0; i < nr_devices; i++) {
- if (null_add_dev()) {
- unregister_blkdev(null_major, "nullb");
- goto err_ppa;
- }
+ ret = null_add_dev();
+ if (ret)
+ goto err_dev;
}
pr_info("null: module loaded\n");
return 0;
-err_ppa:
+
+err_dev:
+ while (!list_empty(&nullb_list)) {
+ nullb = list_entry(nullb_list.next, struct nullb, list);
+ null_del_dev(nullb);
+ }
kmem_cache_destroy(ppa_cache);
- return -EINVAL;
+err_ppa:
+ unregister_blkdev(null_major, "nullb");
+ return ret;
}
static void __exit null_exit(void)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 235708c7c46e..81ea69fee7ca 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
img_request->rq = rq;
+ snapc = NULL; /* img_request consumes a ref */
if (op_type == OBJ_OP_DISCARD)
result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 55fe9020459f..4cc72fa017c7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1230,14 +1230,14 @@ static int smi_start_processing(void *send_info,
new_smi->intf = intf;
- /* Try to claim any interrupts. */
- if (new_smi->irq_setup)
- new_smi->irq_setup(new_smi);
-
/* Set up the timer that drives the interface. */
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+ /* Try to claim any interrupts. */
+ if (new_smi->irq_setup)
+ new_smi->irq_setup(new_smi);
+
/*
* Check if the user forcefully enabled the daemon.
*/
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 10819e248414..335322dc403f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -209,6 +209,8 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
struct clk_gpio_delayed_register_data {
const char *gpio_name;
+ int num_parents;
+ const char **parent_names;
struct device_node *node;
struct mutex lock;
struct clk *clk;
@@ -222,8 +224,6 @@ static struct clk *of_clk_gpio_delayed_register_get(
{
struct clk_gpio_delayed_register_data *data = _data;
struct clk *clk;
- const char **parent_names;
- int i, num_parents;
int gpio;
enum of_gpio_flags of_flags;
@@ -248,26 +248,14 @@ static struct clk *of_clk_gpio_delayed_register_get(
return ERR_PTR(gpio);
}
- num_parents = of_clk_get_parent_count(data->node);
-
- parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
- if (!parent_names) {
- clk = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- for (i = 0; i < num_parents; i++)
- parent_names[i] = of_clk_get_parent_name(data->node, i);
-
- clk = data->clk_register_get(data->node->name, parent_names,
- num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
+ clk = data->clk_register_get(data->node->name, data->parent_names,
+ data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
if (IS_ERR(clk))
goto out;
data->clk = clk;
out:
mutex_unlock(&data->lock);
- kfree(parent_names);
return clk;
}
@@ -296,11 +284,24 @@ static void __init of_gpio_clk_setup(struct device_node *node,
unsigned gpio, bool active_low))
{
struct clk_gpio_delayed_register_data *data;
+ const char **parent_names;
+ int i, num_parents;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return;
+ num_parents = of_clk_get_parent_count(node);
+
+ parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ return;
+
+ for (i = 0; i < num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ data->num_parents = num_parents;
+ data->parent_names = parent_names;
data->node = node;
data->gpio_name = gpio_name;
data->clk_register_get = clk_register_get;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 1ab0fb81c6a0..7bc1c4527ae4 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -778,8 +778,10 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
*/
clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
div = get_pll_div(cg, hwc, clksel);
- if (!div)
+ if (!div) {
+ kfree(hwc);
return NULL;
+ }
pct80_rate = clk_get_rate(div->clk);
pct80_rate *= 8;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 0b501a9fef92..cd0f2726f5e0 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -292,6 +292,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
ret = scpi_clk_add(dev, child, match);
if (ret) {
scpi_clocks_remove(pdev);
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 8564e4342c7d..82fe3662b5f6 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -52,7 +52,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv1 *pll = to_clk_pllv1(hw);
- long long ll;
+ unsigned long long ull;
int mfn_abs;
unsigned int mfi, mfn, mfd, pd;
u32 reg;
@@ -94,16 +94,16 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
rate = parent_rate * 2;
rate /= pd + 1;
- ll = (unsigned long long)rate * mfn_abs;
+ ull = (unsigned long long)rate * mfn_abs;
- do_div(ll, mfd + 1);
+ do_div(ull, mfd + 1);
if (mfn_is_negative(pll, mfn))
- ll = -ll;
+ ull = (rate * mfi) - ull;
+ else
+ ull = (rate * mfi) + ull;
- ll = (rate * mfi) + ll;
-
- return ll;
+ return ull;
}
static struct clk_ops clk_pllv1_ops = {
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index b18f875eac6a..4aeda56ce372 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -79,7 +79,7 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
{
long mfi, mfn, mfd, pdf, ref_clk;
unsigned long dbl;
- s64 temp;
+ u64 temp;
dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
@@ -98,8 +98,9 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
temp = (u64) ref_clk * abs(mfn);
do_div(temp, mfd + 1);
if (mfn < 0)
- temp = -temp;
- temp = (ref_clk * mfi) + temp;
+ temp = (ref_clk * mfi) - temp;
+ else
+ temp = (ref_clk * mfi) + temp;
return temp;
}
@@ -126,7 +127,7 @@ static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
{
u32 reg;
long mfi, pdf, mfn, mfd = 999999;
- s64 temp64;
+ u64 temp64;
unsigned long quad_parent_rate;
quad_parent_rate = 4 * parent_rate;
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index d1b1c95177bb..0a94d9661d91 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -335,22 +335,22 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_SAI0_SEL] = imx_clk_mux("sai0_sel", CCM_CSCMR1, 0, 2, sai_sels, 4);
clk[VF610_CLK_SAI0_EN] = imx_clk_gate("sai0_en", "sai0_sel", CCM_CSCDR1, 16);
clk[VF610_CLK_SAI0_DIV] = imx_clk_divider("sai0_div", "sai0_en", CCM_CSCDR1, 0, 4);
- clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "sai0_div", CCM_CCGR0, CCM_CCGRx_CGn(15));
+ clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(15));
clk[VF610_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", CCM_CSCMR1, 2, 2, sai_sels, 4);
clk[VF610_CLK_SAI1_EN] = imx_clk_gate("sai1_en", "sai1_sel", CCM_CSCDR1, 17);
clk[VF610_CLK_SAI1_DIV] = imx_clk_divider("sai1_div", "sai1_en", CCM_CSCDR1, 4, 4);
- clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "sai1_div", CCM_CCGR1, CCM_CCGRx_CGn(0));
+ clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(0));
clk[VF610_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", CCM_CSCMR1, 4, 2, sai_sels, 4);
clk[VF610_CLK_SAI2_EN] = imx_clk_gate("sai2_en", "sai2_sel", CCM_CSCDR1, 18);
clk[VF610_CLK_SAI2_DIV] = imx_clk_divider("sai2_div", "sai2_en", CCM_CSCDR1, 8, 4);
- clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "sai2_div", CCM_CCGR1, CCM_CCGRx_CGn(1));
+ clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(1));
clk[VF610_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", CCM_CSCMR1, 6, 2, sai_sels, 4);
clk[VF610_CLK_SAI3_EN] = imx_clk_gate("sai3_en", "sai3_sel", CCM_CSCDR1, 19);
clk[VF610_CLK_SAI3_DIV] = imx_clk_divider("sai3_div", "sai3_en", CCM_CSCDR1, 12, 4);
- clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "sai3_div", CCM_CCGR1, CCM_CCGRx_CGn(2));
+ clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(2));
clk[VF610_CLK_NFC_SEL] = imx_clk_mux("nfc_sel", CCM_CSCMR1, 12, 2, nfc_sels, 4);
clk[VF610_CLK_NFC_EN] = imx_clk_gate("nfc_en", "nfc_sel", CCM_CSCDR2, 9);
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 09d2832fbd78..71fd29348f28 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -9,6 +9,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 93e967c0f972..75244915df05 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -9,6 +9,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 993abcdb32cc..37ba04ba1368 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -9,6 +9,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index 5484c31ec568..0ee1f363e4be 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -41,15 +41,10 @@
#define SUN4I_PLL2_OUTPUTS 4
-struct sun4i_pll2_data {
- u32 post_div_offset;
- u32 pre_div_flags;
-};
-
static DEFINE_SPINLOCK(sun4i_a10_pll2_lock);
static void __init sun4i_pll2_setup(struct device_node *node,
- struct sun4i_pll2_data *data)
+ int post_div_offset)
{
const char *clk_name = node->name, *parent;
struct clk **clks, *base_clk, *prediv_clk;
@@ -76,7 +71,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
parent, 0, reg,
SUN4I_PLL2_PRE_DIV_SHIFT,
SUN4I_PLL2_PRE_DIV_WIDTH,
- data->pre_div_flags,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&sun4i_a10_pll2_lock);
if (!prediv_clk) {
pr_err("Couldn't register the prediv clock\n");
@@ -127,7 +122,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
*/
val = readl(reg);
val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT);
- val |= (SUN4I_PLL2_POST_DIV_VALUE - data->post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
+ val |= (SUN4I_PLL2_POST_DIV_VALUE - post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
writel(val, reg);
of_property_read_string_index(node, "clock-output-names",
@@ -191,25 +186,17 @@ err_unmap:
iounmap(reg);
}
-static struct sun4i_pll2_data sun4i_a10_pll2_data = {
- .pre_div_flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
-};
-
static void __init sun4i_a10_pll2_setup(struct device_node *node)
{
- sun4i_pll2_setup(node, &sun4i_a10_pll2_data);
+ sun4i_pll2_setup(node, 0);
}
CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk",
sun4i_a10_pll2_setup);
-static struct sun4i_pll2_data sun5i_a13_pll2_data = {
- .post_div_offset = 1,
-};
-
static void __init sun5i_a13_pll2_setup(struct device_node *node)
{
- sun4i_pll2_setup(node, &sun5i_a13_pll2_data);
+ sun4i_pll2_setup(node, 1);
}
CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk",
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 1dfad0c712cd..2a5d84fdddc5 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -20,6 +20,8 @@ static struct ti_dt_clk dm816x_clks[] = {
DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "sysclk18_ck"),
+ DT_CLK(NULL, "timer_ext_ck", "tclkin_ck"),
DT_CLK(NULL, "mpu_ck", "mpu_ck"),
DT_CLK(NULL, "timer1_fck", "timer1_fck"),
DT_CLK(NULL, "timer2_fck", "timer2_fck"),
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index 9023ca9caf84..b5cc6f66ae5d 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -240,7 +240,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
*/
unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
{
- long long dpll_clk;
+ u64 dpll_clk;
u32 dpll_mult, dpll_div, v;
struct dpll_data *dd;
@@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
dpll_div = v & dd->div1_mask;
dpll_div >>= __ffs(dd->div1_mask);
- dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult;
+ dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult;
do_div(dpll_clk, dpll_div + 1);
return dpll_clk;
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 5b1726829e6d..df2558350fc1 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -214,7 +214,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_divider *divider;
unsigned int div, value;
- unsigned long flags = 0;
u32 val;
if (!hw || !rate)
@@ -228,9 +227,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (value > div_mask(divider))
value = div_mask(divider);
- if (divider->lock)
- spin_lock_irqsave(divider->lock, flags);
-
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = div_mask(divider) << (divider->shift + 16);
} else {
@@ -240,9 +236,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
val |= value << divider->shift;
ti_clk_ll_ops->clk_writel(val, divider->reg);
- if (divider->lock)
- spin_unlock_irqrestore(divider->lock, flags);
-
return 0;
}
@@ -256,8 +249,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
const char *parent_name,
unsigned long flags, void __iomem *reg,
u8 shift, u8 width, u8 clk_divider_flags,
- const struct clk_div_table *table,
- spinlock_t *lock)
+ const struct clk_div_table *table)
{
struct clk_divider *div;
struct clk *clk;
@@ -288,7 +280,6 @@ static struct clk *_register_divider(struct device *dev, const char *name,
div->shift = shift;
div->width = width;
div->flags = clk_divider_flags;
- div->lock = lock;
div->hw.init = &init;
div->table = table;
@@ -421,7 +412,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup)
clk = _register_divider(NULL, setup->name, div->parent,
flags, (void __iomem *)reg, div->bit_shift,
- width, div_flags, table, NULL);
+ width, div_flags, table);
if (IS_ERR(clk))
kfree(table);
@@ -584,8 +575,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
goto cleanup;
clk = _register_divider(NULL, node->name, parent_name, flags, reg,
- shift, width, clk_divider_flags, table,
- NULL);
+ shift, width, clk_divider_flags, table);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index f4b2e9888bdf..66a0d0ed8b55 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -168,7 +168,7 @@ static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
{
struct fapll_data *fd = to_fapll(hw);
u32 fapll_n, fapll_p, v;
- long long rate;
+ u64 rate;
if (ti_fapll_clock_is_bypass(fd))
return parent_rate;
@@ -314,7 +314,7 @@ static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
{
struct fapll_synth *synth = to_synth(hw);
u32 synth_div_m;
- long long rate;
+ u64 rate;
/* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
if (!synth->div)
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 69f08a1d047d..dab9ba88b9d6 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -69,7 +69,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_mux *mux = to_clk_mux(hw);
u32 val;
- unsigned long flags = 0;
if (mux->table) {
index = mux->table[index];
@@ -81,9 +80,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
index++;
}
- if (mux->lock)
- spin_lock_irqsave(mux->lock, flags);
-
if (mux->flags & CLK_MUX_HIWORD_MASK) {
val = mux->mask << (mux->shift + 16);
} else {
@@ -93,9 +89,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
val |= index << mux->shift;
ti_clk_ll_ops->clk_writel(val, mux->reg);
- if (mux->lock)
- spin_unlock_irqrestore(mux->lock, flags);
-
return 0;
}
@@ -109,7 +102,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
const char **parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg,
u8 shift, u32 mask, u8 clk_mux_flags,
- u32 *table, spinlock_t *lock)
+ u32 *table)
{
struct clk_mux *mux;
struct clk *clk;
@@ -133,7 +126,6 @@ static struct clk *_register_mux(struct device *dev, const char *name,
mux->shift = shift;
mux->mask = mask;
mux->flags = clk_mux_flags;
- mux->lock = lock;
mux->table = table;
mux->hw.init = &init;
@@ -175,7 +167,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup)
return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
flags, (void __iomem *)reg, mux->bit_shift, mask,
- mux_flags, NULL, NULL);
+ mux_flags, NULL);
}
/**
@@ -227,8 +219,7 @@ static void of_mux_clk_setup(struct device_node *node)
mask = (1 << fls(mask)) - 1;
clk = _register_mux(NULL, node->name, parent_names, num_parents,
- flags, reg, shift, mask, clk_mux_flags, NULL,
- NULL);
+ flags, reg, shift, mask, clk_mux_flags, NULL);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c
index 1593ade2a815..c4f7d7a9b689 100644
--- a/drivers/clocksource/mmio.c
+++ b/drivers/clocksource/mmio.c
@@ -55,7 +55,7 @@ int __init clocksource_mmio_init(void __iomem *base, const char *name,
{
struct clocksource_mmio *cs;
- if (bits > 32 || bits < 16)
+ if (bits > 64 || bits < 16)
return -EINVAL;
cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a83c995a62df..8412ce5f93a7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
new_policy.governor = gov;
- /* Use the default policy if its valid. */
- if (cpufreq_driver->setpolicy)
- cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
-
+ /* Use the default policy if there is no last_policy. */
+ if (cpufreq_driver->setpolicy) {
+ if (policy->last_policy)
+ new_policy.policy = policy->last_policy;
+ else
+ cpufreq_parse_governor(gov->name, &new_policy.policy,
+ NULL);
+ }
/* set default policy */
return cpufreq_set_policy(policy, &new_policy);
}
@@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu)
if (has_target())
strncpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
+ else
+ policy->last_policy = policy->policy;
} else if (cpu == policy->cpu) {
/* Nominate new CPU */
policy->cpu = cpumask_any(policy->cpus);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 733aa5153e74..68ef8fd9482f 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -648,7 +648,7 @@ late_initcall(s3c_cpufreq_initcall);
*
* Register the given set of PLLs with the system.
*/
-int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
+int s3c_plltab_register(struct cpufreq_frequency_table *plls,
unsigned int plls_no)
{
struct cpufreq_frequency_table *vals;
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 73ef49922788..7038f364acb5 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
processed += to_process;
} while (processed < nbytes);
- rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
+ rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
authsize) ? -EBADMSG : 0;
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index eee624f589b6..abd465f479c4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -21,6 +21,7 @@
#include <crypto/internal/aead.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -418,7 +419,7 @@ mac:
itag, req->src, req->assoclen + nbytes,
crypto_aead_authsize(crypto_aead_reqtfm(req)),
SCATTERWALK_FROM_SG);
- rc = memcmp(itag, otag,
+ rc = crypto_memneq(itag, otag,
crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
-EBADMSG : 0;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 46f531e19ccf..b6f9f42e2985 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
} else
oicv = (char *)&edesc->link_tbl[0];
- err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
}
kfree(edesc);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7f039de143f0..370c661c7d7b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -156,7 +156,7 @@
#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
-#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
+#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
@@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
NULL,
src_addr, dst_addr,
xt, xt->sgl);
- for (i = 0; i < xt->numf; i++)
+
+ /* Length of the block is (BLEN+1) microblocks. */
+ for (i = 0; i < xt->numf - 1; i++)
at_xdmac_increment_block_count(chan, first);
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
@@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
/* Check remaining length and change data width if needed. */
dwidth = at_xdmac_align_width(chan,
src_addr | dst_addr | xfer_size);
+ chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
ublen = xfer_size >> dwidth;
@@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
* since we don't care about the stride anymore.
*/
if ((i == (sg_len - 1)) &&
- sg_dma_len(ppsg) == sg_dma_len(psg)) {
+ sg_dma_len(psg) == sg_dma_len(sg)) {
dev_dbg(chan2dev(chan),
"%s: desc 0x%p can be merged with desc 0x%p\n",
__func__, desc, pdesc);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index c92d6a70ccf3..996c4b00d323 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -31,6 +31,7 @@
*/
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -62,6 +63,11 @@ struct bcm2835_dma_cb {
uint32_t pad[2];
};
+struct bcm2835_cb_entry {
+ struct bcm2835_dma_cb *cb;
+ dma_addr_t paddr;
+};
+
struct bcm2835_chan {
struct virt_dma_chan vc;
struct list_head node;
@@ -72,18 +78,18 @@ struct bcm2835_chan {
int ch;
struct bcm2835_desc *desc;
+ struct dma_pool *cb_pool;
void __iomem *chan_base;
int irq_number;
};
struct bcm2835_desc {
+ struct bcm2835_chan *c;
struct virt_dma_desc vd;
enum dma_transfer_direction dir;
- unsigned int control_block_size;
- struct bcm2835_dma_cb *control_block_base;
- dma_addr_t control_block_base_phys;
+ struct bcm2835_cb_entry *cb_list;
unsigned int frames;
size_t size;
@@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
{
struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
- dma_free_coherent(desc->vd.tx.chan->device->dev,
- desc->control_block_size,
- desc->control_block_base,
- desc->control_block_base_phys);
+ int i;
+
+ for (i = 0; i < desc->frames; i++)
+ dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
+ desc->cb_list[i].paddr);
+
+ kfree(desc->cb_list);
kfree(desc);
}
@@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
c->desc = d = to_bcm2835_dma_desc(&vd->tx);
- writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
+ writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
}
@@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ struct device *dev = c->vc.chan.device->dev;
+
+ dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
- dev_dbg(c->vc.chan.device->dev,
- "Allocating DMA channel %d\n", c->ch);
+ c->cb_pool = dma_pool_create(dev_name(dev), dev,
+ sizeof(struct bcm2835_dma_cb), 0, 0);
+ if (!c->cb_pool) {
+ dev_err(dev, "unable to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
return request_irq(c->irq_number,
bcm2835_dma_callback, 0, "DMA IRQ", c);
@@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&c->vc);
free_irq(c->irq_number, c);
+ dma_pool_destroy(c->cb_pool);
dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
}
@@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
size_t size;
for (size = i = 0; i < d->frames; i++) {
- struct bcm2835_dma_cb *control_block =
- &d->control_block_base[i];
+ struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
size_t this_size = control_block->length;
dma_addr_t dma;
@@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
dma_addr_t dev_addr;
unsigned int es, sync_type;
unsigned int frame;
+ int i;
/* Grab configuration */
if (!is_slave_direction(direction)) {
@@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
if (!d)
return NULL;
+ d->c = c;
d->dir = direction;
d->frames = buf_len / period_len;
- /* Allocate memory for control blocks */
- d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
- d->control_block_base = dma_zalloc_coherent(chan->device->dev,
- d->control_block_size, &d->control_block_base_phys,
- GFP_NOWAIT);
-
- if (!d->control_block_base) {
+ d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
+ if (!d->cb_list) {
kfree(d);
return NULL;
}
+ /* Allocate memory for control blocks */
+ for (i = 0; i < d->frames; i++) {
+ struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
+
+ cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
+ &cb_entry->paddr);
+ if (!cb_entry->cb)
+ goto error_cb;
+ }
/*
* Iterate over all frames, create a control block
* for each frame and link them together.
*/
for (frame = 0; frame < d->frames; frame++) {
- struct bcm2835_dma_cb *control_block =
- &d->control_block_base[frame];
+ struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
/* Setup adresses */
if (d->dir == DMA_DEV_TO_MEM) {
@@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
* This DMA engine driver currently only supports cyclic DMA.
* Therefore, wrap around at number of frames.
*/
- control_block->next = d->control_block_base_phys +
- sizeof(struct bcm2835_dma_cb)
- * ((frame + 1) % d->frames);
+ control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
}
return vchan_tx_prep(&c->vc, &d->vd, flags);
+error_cb:
+ i--;
+ for (; i >= 0; i--) {
+ struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
+
+ dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
+ }
+
+ kfree(d->cb_list);
+ kfree(d);
+ return NULL;
}
static int bcm2835_dma_slave_config(struct dma_chan *chan,
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 0675e268d577..16fe773fb846 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
return ret;
}
-static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels)
+static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
{
- s16 *memcpy_ch = memcpy_channels;
-
if (!memcpy_channels)
return false;
- while (*memcpy_ch != -1) {
- if (*memcpy_ch == ch_num)
+ while (*memcpy_channels != -1) {
+ if (*memcpy_channels == ch_num)
return true;
- memcpy_ch++;
+ memcpy_channels++;
}
return false;
}
@@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
{
struct dma_device *s_ddev = &ecc->dma_slave;
struct dma_device *m_ddev = NULL;
- s16 *memcpy_channels = ecc->info->memcpy_channels;
+ s32 *memcpy_channels = ecc->info->memcpy_channels;
int i, j;
dma_cap_zero(s_ddev->cap_mask);
@@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
if (prop) {
const char pname[] = "ti,edma-memcpy-channels";
- size_t nelm = sz / sizeof(s16);
- s16 *memcpy_ch;
+ size_t nelm = sz / sizeof(s32);
+ s32 *memcpy_ch;
- memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16),
+ memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
GFP_KERNEL);
if (!memcpy_ch)
return ERR_PTR(-ENOMEM);
- ret = of_property_read_u16_array(dev->of_node, pname,
- (u16 *)memcpy_ch, nelm);
+ ret = of_property_read_u32_array(dev->of_node, pname,
+ (u32 *)memcpy_ch, nelm);
if (ret)
return ERR_PTR(ret);
@@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
&sz);
if (prop) {
const char pname[] = "ti,edma-reserved-slot-ranges";
+ u32 (*tmp)[2];
s16 (*rsv_slots)[2];
- size_t nelm = sz / sizeof(*rsv_slots);
+ size_t nelm = sz / sizeof(*tmp);
struct edma_rsv_info *rsv_info;
+ int i;
if (!nelm)
return info;
+ tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ERR_PTR(-ENOMEM);
+
rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
- if (!rsv_info)
+ if (!rsv_info) {
+ kfree(tmp);
return ERR_PTR(-ENOMEM);
+ }
rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
GFP_KERNEL);
- if (!rsv_slots)
+ if (!rsv_slots) {
+ kfree(tmp);
return ERR_PTR(-ENOMEM);
+ }
- ret = of_property_read_u16_array(dev->of_node, pname,
- (u16 *)rsv_slots, nelm * 2);
- if (ret)
+ ret = of_property_read_u32_array(dev->of_node, pname,
+ (u32 *)tmp, nelm * 2);
+ if (ret) {
+ kfree(tmp);
return ERR_PTR(ret);
+ }
+ for (i = 0; i < nelm; i++) {
+ rsv_slots[i][0] = tmp[i][0];
+ rsv_slots[i][1] = tmp[i][1];
+ }
rsv_slots[nelm][0] = -1;
rsv_slots[nelm][1] = -1;
+
info->rsv = rsv_info;
info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
+
+ kfree(tmp);
}
return info;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 068e920ecb68..cddfa8dbf4bd 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -317,6 +317,7 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
struct device *dev = mic_dma_ch_to_device(mic_ch);
int result;
+ struct dma_async_tx_descriptor *tx = NULL;
if (!len && !flags)
return NULL;
@@ -324,10 +325,13 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
spin_lock(&mic_ch->prep_lock);
result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
if (result >= 0)
- return allocate_tx(mic_ch);
- dev_err(dev, "Error enqueueing dma, error=%d\n", result);
+ tx = allocate_tx(mic_ch);
+
+ if (!tx)
+ dev_err(dev, "Error enqueueing dma, error=%d\n", result);
+
spin_unlock(&mic_ch->prep_lock);
- return NULL;
+ return tx;
}
static struct dma_async_tx_descriptor *
@@ -335,13 +339,14 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
{
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
int ret;
+ struct dma_async_tx_descriptor *tx = NULL;
spin_lock(&mic_ch->prep_lock);
ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
if (!ret)
- return allocate_tx(mic_ch);
+ tx = allocate_tx(mic_ch);
spin_unlock(&mic_ch->prep_lock);
- return NULL;
+ return tx;
}
/* Return the status of the transaction */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index a24f5cb877e0..953dc9195937 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -122,12 +122,10 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
}
ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
- if (ret)
- return ret;
release_firmware(fw);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load);
@@ -256,7 +254,6 @@ int fpga_mgr_register(struct device *dev, const char *name,
void *priv)
{
struct fpga_manager *mgr;
- const char *dt_label;
int id, ret;
if (!mops || !mops->write_init || !mops->write ||
@@ -300,11 +297,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
mgr->dev.id = id;
dev_set_drvdata(dev, mgr);
- dt_label = of_get_property(mgr->dev.of_node, "label", NULL);
- if (dt_label)
- ret = dev_set_name(&mgr->dev, "%s", dt_label);
- else
- ret = dev_set_name(&mgr->dev, "fpga%d", id);
+ ret = dev_set_name(&mgr->dev, "fpga%d", id);
+ if (ret)
+ goto error_device;
ret = device_add(&mgr->dev);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 251b14736de9..5a5f04d0902d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -539,6 +539,7 @@ struct amdgpu_bo {
/* Constant after initialization */
struct amdgpu_device *adev;
struct drm_gem_object gem_base;
+ struct amdgpu_bo *parent;
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
@@ -955,6 +956,8 @@ struct amdgpu_vm {
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
/* for interval tree */
spinlock_t it_lock;
+ /* protecting freed */
+ spinlock_t freed_lock;
};
struct amdgpu_vm_manager {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1d44d508d4d4..4f352ec9dec4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -222,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
p->uf.bo = gem_to_amdgpu_bo(gobj);
+ amdgpu_bo_ref(p->uf.bo);
+ drm_gem_object_unreference_unlocked(gobj);
p->uf.offset = fence_data->offset;
} else {
ret = -EINVAL;
@@ -487,7 +489,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
- drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+ amdgpu_bo_unref(&parser->uf.bo);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -776,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i]);
kfree(job->ibs);
if (job->uf.bo)
- drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
+ amdgpu_bo_unref(&job->uf.bo);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index e173a5a02f0d..5580d3420c3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
unsigned i;
+ int vpos, hpos, stat, min_udelay;
+ struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl);
for (i = 0; i < work->shared_count; ++i)
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ /* If this happens to execute within the "virtually extended" vblank
+ * interval before the start of the real vblank interval then it needs
+ * to delay programming the mmio flip until the real vblank is entered.
+ * This prevents completing a flip too early due to the way we fudge
+ * our vblank counter and vblank timestamps in order to work around the
+ * problem that the hw fires vblank interrupts before actual start of
+ * vblank (when line buffer refilling is done for a frame). It
+ * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
+ * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
+ *
+ * In practice this won't execute very often unless on very fast
+ * machines because the time window for this to happen is very small.
+ */
+ for (;;) {
+ /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ * start in hpos, and to the "fudged earlier" vblank start in
+ * vpos.
+ */
+ stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
+ GET_DISTANCE_TO_VBLANKSTART,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode);
+
+ if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
+ !(vpos >= 0 && hpos <= 0))
+ break;
+
+ /* Sleep at least until estimated real start of hw vblank */
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ usleep_range(min_udelay, 2 * min_udelay);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ };
+
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ amdgpu_bo_unref(&work->old_rbo);
kfree(work->shared);
kfree(work);
}
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
obj = old_amdgpu_fb->obj;
/* take a reference to the old object */
- drm_gem_object_reference(obj);
work->old_rbo = gem_to_amdgpu_bo(obj);
+ amdgpu_bo_ref(work->old_rbo);
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
obj = new_amdgpu_fb->obj;
@@ -222,7 +259,7 @@ pflip_cleanup:
amdgpu_bo_unreserve(new_rbo);
cleanup:
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ amdgpu_bo_unref(&work->old_rbo);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* \param dev Device to query.
* \param pipe Crtc to query.
* \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * For driver internal use only also supports these flags:
+ *
+ * USE_REAL_VBLANKSTART to use the real start of vblank instead
+ * of a fudged earlier start of vblank.
+ *
+ * GET_DISTANCE_TO_VBLANKSTART to return distance to the
+ * fudged earlier start of vblank in *vpos and the distance
+ * to true start of vblank in *hpos.
+ *
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
* \param *stime Target location for timestamp taken immediately before
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
vbl_end = 0;
}
+ /* Called from driver internal vblank counter query code? */
+ if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+ /* Caller wants distance from real vbl_start in *hpos */
+ *hpos = *vpos - vbl_start;
+ }
+
+ /* Fudge vblank to start a few scanlines earlier to handle the
+ * problem that vblank irqs fire a few scanlines before start
+ * of vblank. Some driver internal callers need the true vblank
+ * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
+ *
+ * The cause of the "early" vblank irq is that the irq is triggered
+ * by the line buffer logic when the line buffer read position enters
+ * the vblank, whereas our crtc scanout position naturally lags the
+ * line buffer read position.
+ */
+ if (!(flags & USE_REAL_VBLANKSTART))
+ vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
+
/* Test scanout position against vblank region. */
if ((*vpos < vbl_start) && (*vpos >= vbl_end))
in_vbl = false;
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+ /* Called from driver internal vblank counter query code? */
+ if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+ /* Caller wants distance from fudged earlier vbl_start */
+ *vpos -= vbl_start;
+ return ret;
+ }
+
/* Check if inside vblank area and apply corrective offsets:
* vpos will then be >=0 in video scanout area, but negative
* within vblank area, counting down the number of lines until
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* Correct for shifted end of vbl at vbl_end. */
*vpos = *vpos - vbl_end;
- /* In vblank? */
- if (in_vbl)
- ret |= DRM_SCANOUTPOS_IN_VBLANK;
-
- /* Is vpos outside nominal vblank area, but less than
- * 1/100 of a frame height away from start of vblank?
- * If so, assume this isn't a massively delayed vblank
- * interrupt, but a vblank interrupt that fired a few
- * microseconds before true start of vblank. Compensate
- * by adding a full frame duration to the final timestamp.
- * Happens, e.g., on ATI R500, R600.
- *
- * We only do this if DRM_CALLED_FROM_VBLIRQ.
- */
- if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
- vbl_start = mode->crtc_vdisplay;
- vtotal = mode->crtc_vtotal;
-
- if (vbl_start - *vpos < vtotal / 100) {
- *vpos -= vtotal;
-
- /* Signal this correction as "applied". */
- ret |= 0x8;
- }
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index fc32fc01a64b..9c253c535d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -235,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_USERPTR_REGISTER))
return -EINVAL;
- if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
- !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
+ if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
+ !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
+ !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
/* if we want to write to it we must require anonymous
memory and install a MMU notifier */
@@ -476,6 +477,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
+ list_for_each_entry(entry, &duplicates, head) {
+ domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+ if (domain == AMDGPU_GEM_DOMAIN_CPU)
+ goto error_unreserve;
+ }
+
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
goto error_unreserve;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1618e2294a16..e23843f4d877 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
{
struct amdgpu_device *adev = dev->dev_private;
+ int vpos, hpos, stat;
+ u32 count;
if (pipe >= adev->mode_info.num_crtc) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
- return amdgpu_display_vblank_get_counter(adev, pipe);
+ /* The hw increments its frame counter at start of vsync, not at start
+ * of vblank, as is required by DRM core vblank counter handling.
+ * Cook the hw count here to make it appear to the caller as if it
+ * incremented at start of vblank. We measure distance to start of
+ * vblank in vpos. vpos therefore will be >= 0 between start of vblank
+ * and start of vsync, so vpos >= 0 means to bump the hw frame counter
+ * result by 1 to give the proper appearance to caller.
+ */
+ if (adev->mode_info.crtcs[pipe]) {
+ /* Repeat readout if needed to provide stable result if
+ * we cross start of vsync during the queries.
+ */
+ do {
+ count = amdgpu_display_vblank_get_counter(adev, pipe);
+ /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
+ * distance to start of vblank, instead of regular
+ * vertical scanout pos.
+ */
+ stat = amdgpu_get_crtc_scanoutpos(
+ dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
+ &vpos, &hpos, NULL, NULL,
+ &adev->mode_info.crtcs[pipe]->base.hwmode);
+ } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
+
+ if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
+ DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
+ } else {
+ DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
+ pipe, vpos);
+
+ /* Bump counter if we are at >= leading edge of vblank,
+ * but before vsync where vpos would turn negative and
+ * the hw counter really increments.
+ */
+ if (vpos >= 0)
+ count++;
+ }
+ } else {
+ /* Fallback to use value as is. */
+ count = amdgpu_display_vblank_get_counter(adev, pipe);
+ DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
+ }
+
+ return count;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index b62c1710cab6..064ebb347074 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -407,6 +407,7 @@ struct amdgpu_crtc {
u32 line_time;
u32 wm_low;
u32 wm_high;
+ u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
};
@@ -528,6 +529,10 @@ struct amdgpu_framebuffer {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
+/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
+#define USE_REAL_VBLANKSTART (1 << 30)
+#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
+
void amdgpu_link_encoder_connector(struct drm_device *dev);
struct drm_connector *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0d524384ff79..c3ce103b6a33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->adev->gem.mutex);
drm_gem_object_release(&bo->gem_base);
+ amdgpu_bo_unref(&bo->parent);
kfree(bo->metadata);
kfree(bo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d4bac5f49939..8a1752ff3d8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
int r;
- if (gtt->userptr)
- amdgpu_ttm_tt_pin_userptr(ttm);
-
+ if (gtt->userptr) {
+ r = amdgpu_ttm_tt_pin_userptr(ttm);
+ if (r) {
+ DRM_ERROR("failed to pin userptr\n");
+ return r;
+ }
+ }
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
- if (mem && mem->mem_type == TTM_PL_TT)
+ if (mem && mem->mem_type == TTM_PL_TT) {
flags |= AMDGPU_PTE_SYSTEM;
- if (!ttm || ttm->caching_state == tt_cached)
- flags |= AMDGPU_PTE_SNOOPED;
+ if (ttm->caching_state == tt_cached)
+ flags |= AMDGPU_PTE_SNOOPED;
+ }
if (adev->asic_type >= CHIP_TOPAZ)
flags |= AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ae037e5b6ad0..b53d273eb7a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
int r;
+ spin_lock(&vm->freed_lock);
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
-
+ spin_unlock(&vm->freed_lock);
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
kfree(mapping);
if (r)
return r;
+ spin_lock(&vm->freed_lock);
}
+ spin_unlock(&vm->freed_lock);
+
return 0;
}
@@ -1079,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (r)
goto error_free;
+ /* Keep a reference to the page table to avoid freeing
+ * them up in the wrong order.
+ */
+ pt->parent = amdgpu_bo_ref(vm->page_directory);
+
r = amdgpu_vm_clear_bo(adev, pt);
if (r) {
amdgpu_bo_unref(&pt);
@@ -1150,10 +1159,13 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (valid)
+ if (valid) {
+ spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed);
- else
+ spin_unlock(&vm->freed_lock);
+ } else {
kfree(mapping);
+ }
return 0;
}
@@ -1186,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
interval_tree_remove(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
+ spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed);
+ spin_unlock(&vm->freed_lock);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
@@ -1247,6 +1261,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
spin_lock_init(&vm->it_lock);
+ spin_lock_init(&vm->freed_lock);
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index cb0f7747e3dc..4dcc8fba5792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask;
+ u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
+ lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
+ /* Save number of lines the linebuffer leads before the scanout */
+ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 5af3721851d6..8f1e51128b33 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask;
+ u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
+ lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
+ /* Save number of lines the linebuffer leads before the scanout */
+ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4f7b49a6dc50..42d954dc436d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask;
+ u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
+ lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
+ /* Save number of lines the linebuffer leads before the scanout */
+ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 7427d8cd4c43..ed8abb58a785 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -513,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index cb0e50ebb528..d39028440814 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -657,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL4, tmp);
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 651129f2ec1d..3a4820e863ec 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -288,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
*/
static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
{
+ struct amd_gpu_scheduler *sched = sched_job->sched;
struct amd_sched_entity *entity = sched_job->s_entity;
bool added, first = false;
@@ -302,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
/* first job wakes up scheduler */
if (first)
- amd_sched_wakeup(sched_job->sched);
+ amd_sched_wakeup(sched);
return added;
}
@@ -318,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
+ trace_amd_sched_job(sched_job);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
- trace_amd_sched_job(sched_job);
}
/**
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9362609df38a..7dd6728dd092 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
+ if (!file_priv->allowed_master) {
+ ret = drm_new_set_master(dev, file_priv);
+ goto out_unlock;
+ }
+
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
if (dev->driver->master_set) {
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c59ce4d0ef75..6b5625e66119 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -126,6 +126,60 @@ static int drm_cpu_valid(void)
}
/**
+ * drm_new_set_master - Allocate a new master object and become master for the
+ * associated master realm.
+ *
+ * @dev: The associated device.
+ * @fpriv: File private identifying the client.
+ *
+ * This function must be called with dev::struct_mutex held.
+ * Returns negative error code on failure. Zero on success.
+ */
+int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct drm_master *old_master;
+ int ret;
+
+ lockdep_assert_held_once(&dev->master_mutex);
+
+ /* create a new master */
+ fpriv->minor->master = drm_master_create(fpriv->minor);
+ if (!fpriv->minor->master)
+ return -ENOMEM;
+
+ /* take another reference for the copy in the local file priv */
+ old_master = fpriv->master;
+ fpriv->master = drm_master_get(fpriv->minor->master);
+
+ if (dev->driver->master_create) {
+ ret = dev->driver->master_create(dev, fpriv->master);
+ if (ret)
+ goto out_err;
+ }
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, fpriv, true);
+ if (ret)
+ goto out_err;
+ }
+
+ fpriv->is_master = 1;
+ fpriv->allowed_master = 1;
+ fpriv->authenticated = 1;
+ if (old_master)
+ drm_master_put(&old_master);
+
+ return 0;
+
+out_err:
+ /* drop both references and restore old master on failure */
+ drm_master_put(&fpriv->minor->master);
+ drm_master_put(&fpriv->master);
+ fpriv->master = old_master;
+
+ return ret;
+}
+
+/**
* Called whenever a process opens /dev/drm.
*
* \param filp file pointer.
@@ -189,35 +243,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
mutex_lock(&dev->master_mutex);
if (drm_is_primary_client(priv) && !priv->minor->master) {
/* create a new master */
- priv->minor->master = drm_master_create(priv->minor);
- if (!priv->minor->master) {
- ret = -ENOMEM;
+ ret = drm_new_set_master(dev, priv);
+ if (ret)
goto out_close;
- }
-
- priv->is_master = 1;
- /* take another reference for the copy in the local file priv */
- priv->master = drm_master_get(priv->minor->master);
- priv->authenticated = 1;
-
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, priv->master);
- if (ret) {
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- goto out_close;
- }
- }
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, priv, true);
- if (ret) {
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- goto out_close;
- }
- }
} else if (drm_is_primary_client(priv)) {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2151ea551d3b..607f493ae801 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
{
- WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ assert_spin_locked(&dev->event_lock);
+
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
@@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev,
}
/**
+ * drm_arm_vblank_event - arm vblank event after pageflip
+ * @dev: DRM device
+ * @pipe: CRTC index
+ * @e: the event to prepare to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ *
+ * This is the legacy version of drm_crtc_arm_vblank_event().
+ */
+void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
+ struct drm_pending_vblank_event *e)
+{
+ assert_spin_locked(&dev->event_lock);
+
+ e->pipe = pipe;
+ e->event.sequence = drm_vblank_count(dev, pipe);
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+}
+EXPORT_SYMBOL(drm_arm_vblank_event);
+
+/**
+ * drm_crtc_arm_vblank_event - arm vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ *
+ * This is the native KMS version of drm_arm_vblank_event().
+ */
+void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
+{
+ drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
+
+/**
* drm_send_vblank_event - helper to send vblank event after pageflip
* @dev: DRM device
* @pipe: CRTC index
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a18164f2f6d2..f8b5fcfa91a2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -229,7 +229,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
mode_flags |= DRM_MODE_FLAG_3D_MASK;
list_for_each_entry(mode, &connector->modes, head) {
- mode->status = drm_mode_validate_basic(mode);
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_basic(mode);
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_size(mode, maxX, maxY);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a3b22bdacd44..8aab974b0564 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2734,6 +2734,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "AUX_C";
case POWER_DOMAIN_AUX_D:
return "AUX_D";
+ case POWER_DOMAIN_GMBUS:
+ return "GMBUS";
case POWER_DOMAIN_INIT:
return "INIT";
default:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 95bb27de774f..a01e51581c4c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -199,6 +199,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_GMBUS,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 91bb1fc27420..32e6aade6223 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
if (i915_gem_request_completed(req, true))
return 0;
- timeout_expire = timeout ?
- jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
+ timeout_expire = 0;
+ if (timeout) {
+ if (WARN_ON(*timeout < 0))
+ return -EINVAL;
+
+ if (*timeout == 0)
+ return -ETIME;
+
+ timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+ }
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8c688a5f1589..02ceb7a4b481 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx)
if (!ppgtt)
return;
- WARN_ON(!list_empty(&ppgtt->base.active_list));
-
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
mm_list) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 40a10b25956c..f010391b87f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev)) {
- uint32_t ddc2 = I915_READ(DCC2);
-
- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
- dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ if (IS_GEN4(dev) &&
+ !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
if (dcc == 0xffffffff) {
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
+ *
+ * Reports indicate that the swizzling actually
+ * varies depending upon page placement inside the
+ * channels, i.e. we see swizzled pages where the
+ * banks of memory are paired and unswizzled on the
+ * uneven portion, so leave that as unknown.
*/
- if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else {
+ if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
+ if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
+ swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ /* Userspace likes to explode if it sees unknown swizzling,
+ * so lie. We will finish the lie when reporting through
+ * the get-tiling-ioctl by reporting the physical swizzle
+ * mode as unknown instead.
+ *
+ * As we don't strictly know what the swizzling is, it may be
+ * bit17 dependent, and so we need to also prevent the pages
+ * from being moved.
+ */
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 71860f8680f9..62211abe4922 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5194,11 +5194,31 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
case PORT_E:
return POWER_DOMAIN_PORT_DDI_E_2_LANES;
default:
- WARN_ON_ONCE(1);
+ MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
}
}
+static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return POWER_DOMAIN_AUX_A;
+ case PORT_B:
+ return POWER_DOMAIN_AUX_B;
+ case PORT_C:
+ return POWER_DOMAIN_AUX_C;
+ case PORT_D:
+ return POWER_DOMAIN_AUX_D;
+ case PORT_E:
+ /* FIXME: Check VBT for actual wiring of PORT E */
+ return POWER_DOMAIN_AUX_D;
+ default:
+ MISSING_CASE(port);
+ return POWER_DOMAIN_AUX_A;
+ }
+}
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
@@ -5230,6 +5250,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
}
}
+enum intel_display_power_domain
+intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct intel_digital_port *intel_dig_port;
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_HDMI:
+ /*
+ * Only DDI platforms should ever use these output types.
+ * We can get here after the HDMI detect code has already set
+ * the type of the shared encoder. Since we can't be sure
+ * what's the status of the given connectors, play safe and
+ * run the DP detection too.
+ */
+ WARN_ON_ONCE(!HAS_DDI(dev));
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_EDP:
+ intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ return port_to_aux_power_domain(intel_dig_port->port);
+ case INTEL_OUTPUT_DP_MST:
+ intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+ return port_to_aux_power_domain(intel_dig_port->port);
+ default:
+ MISSING_CASE(intel_encoder->type);
+ return POWER_DOMAIN_AUX_A;
+ }
+}
+
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -6259,9 +6309,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
if (to_intel_plane_state(crtc->primary->state)->visible) {
intel_crtc_wait_for_pending_flips(crtc);
intel_pre_disable_primary(crtc);
+
+ intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
+ to_intel_plane_state(crtc->primary->state)->visible = false;
}
- intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_update_watermarks(crtc);
@@ -12460,7 +12512,6 @@ intel_pipe_config_compare(struct drm_device *dev,
if (INTEL_INFO(dev)->gen < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
- PIPE_CONF_CHECK_I(has_drrs);
if (current_config->has_drrs)
PIPE_CONF_CHECK_M_N(dp_m2_n2);
} else
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09bdd94ca3ba..78b8ec84d576 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp)
* See vlv_power_sequencer_reset() why we need
* a power domain reference here.
*/
- power_domain = intel_display_port_power_domain(encoder);
+ power_domain = intel_display_port_aux_power_domain(encoder);
intel_display_power_get(dev_priv, power_domain);
mutex_lock(&dev_priv->pps_mutex);
@@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
mutex_unlock(&dev_priv->pps_mutex);
- power_domain = intel_display_port_power_domain(encoder);
+ power_domain = intel_display_port_aux_power_domain(encoder);
intel_display_power_put(dev_priv, power_domain);
}
@@ -816,8 +816,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
intel_dp_check_edp(intel_dp);
- intel_aux_display_runtime_get(dev_priv);
-
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ_NOTRACE(ch_ctl);
@@ -926,7 +924,6 @@ done:
ret = recv_bytes;
out:
pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
- intel_aux_display_runtime_put(dev_priv);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
@@ -1784,7 +1781,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- power_domain = intel_display_port_power_domain(intel_encoder);
+ power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
@@ -1874,7 +1871,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & POWER_TARGET_ON) == 0)
intel_dp->last_power_cycle = jiffies;
- power_domain = intel_display_port_power_domain(intel_encoder);
+ power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
@@ -2025,7 +2022,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
wait_panel_off(intel_dp);
/* We got a reference when we enabled the VDD. */
- power_domain = intel_display_port_power_domain(intel_encoder);
+ power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
@@ -4765,26 +4762,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = false;
}
-static enum intel_display_power_domain
-intel_dp_power_get(struct intel_dp *dp)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
- enum intel_display_power_domain power_domain;
-
- power_domain = intel_display_port_power_domain(encoder);
- intel_display_power_get(to_i915(encoder->base.dev), power_domain);
-
- return power_domain;
-}
-
-static void
-intel_dp_power_put(struct intel_dp *dp,
- enum intel_display_power_domain power_domain)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
- intel_display_power_put(to_i915(encoder->base.dev), power_domain);
-}
-
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
@@ -4808,7 +4785,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
}
- power_domain = intel_dp_power_get(intel_dp);
+ power_domain = intel_display_port_aux_power_domain(intel_encoder);
+ intel_display_power_get(to_i915(dev), power_domain);
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp))
@@ -4853,7 +4831,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
}
out:
- intel_dp_power_put(intel_dp, power_domain);
+ intel_display_power_put(to_i915(dev), power_domain);
return status;
}
@@ -4862,6 +4840,7 @@ intel_dp_force(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
enum intel_display_power_domain power_domain;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -4871,11 +4850,12 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- power_domain = intel_dp_power_get(intel_dp);
+ power_domain = intel_display_port_aux_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
intel_dp_set_edid(intel_dp);
- intel_dp_power_put(intel_dp, power_domain);
+ intel_display_power_put(dev_priv, power_domain);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
@@ -5091,7 +5071,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- power_domain = intel_display_port_power_domain(&intel_dig_port->base);
+ power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
intel_display_power_get(dev_priv, power_domain);
edp_panel_vdd_schedule_off(intel_dp);
@@ -5153,7 +5133,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
enum intel_display_power_domain power_domain;
enum irqreturn ret = IRQ_NONE;
- if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+ if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
+ intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -5172,7 +5153,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
port_name(intel_dig_port->port),
long_hpd ? "long" : "short");
- power_domain = intel_display_port_power_domain(intel_encoder);
+ power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0598932ce623..f2a1142bff34 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1169,6 +1169,8 @@ void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
+enum intel_display_power_domain
+intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
@@ -1377,8 +1379,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9eafa191cee2..81cdd9ff3892 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1335,21 +1335,17 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
- struct intel_encoder *intel_encoder =
- &hdmi_to_dig_port(intel_hdmi)->base;
- enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
bool connected = false;
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (force)
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1383,6 +1379,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+
while (!live_status && --retry) {
live_status = intel_digital_port_connected(dev_priv,
hdmi_to_dig_port(intel_hdmi));
@@ -1402,6 +1400,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
} else
status = connector_status_disconnected;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
return status;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1369fc41d039..8324654037b6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
int i = 0, inc, try = 0;
int ret = 0;
- intel_aux_display_runtime_get(dev_priv);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
@@ -595,7 +595,9 @@ timeout:
out:
mutex_unlock(&dev_priv->gmbus_mutex);
- intel_aux_display_runtime_put(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 071a76b9ac52..f091ad12d694 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4782,8 +4782,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
/* 2b: Program RC6 thresholds.*/
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
- if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
- (INTEL_REVID(dev) <= SKL_REVID_E0)))
+ if (IS_SKYLAKE(dev))
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
else
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4825,7 +4824,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
+ ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index d89c1d0aa1b7..7e23d65c9b24 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -362,6 +362,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
@@ -1483,6 +1484,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
+ BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -1845,6 +1847,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
i915.disable_power_well);
+ BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
+
mutex_init(&power_domains->lock);
/*
@@ -2064,36 +2068,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_get - grab an auxiliary power domain reference
- * @dev_priv: i915 device instance
- *
- * This function grabs a power domain reference for the auxiliary power domain
- * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
- * parents are powered up. Therefore users should only grab a reference to the
- * innermost power domain they need.
- *
- * Any power domain reference obtained by this function must have a symmetric
- * call to intel_aux_display_runtime_put() to release the reference again.
- */
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
-{
- intel_runtime_pm_get(dev_priv);
-}
-
-/**
- * intel_aux_display_runtime_put - release an auxiliary power domain reference
- * @dev_priv: i915 device instance
- *
- * This function drops the auxiliary power domain reference obtained by
- * intel_aux_display_runtime_get() and might power down the corresponding
- * hardware block right away if this is the last reference.
- */
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
-{
- intel_runtime_pm_put(dev_priv);
-}
-
-/**
* intel_runtime_pm_get - grab a runtime pm reference
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 64f16ea779ef..7b990b4e96d2 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -63,8 +63,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
struct imx_drm_device *imxdrm = drm->dev_private;
- if (imxdrm->fbhelper)
- drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
+ drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
#endif
}
@@ -340,7 +339,7 @@ err_kms:
* imx_drm_add_crtc - add a new crtc
*/
int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
- struct imx_drm_crtc **new_crtc,
+ struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
struct device_node *port)
{
@@ -379,7 +378,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
- drm_crtc_init(drm, crtc,
+ drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
return 0;
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 28e776d8d9d2..83284b4d4be1 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -9,6 +9,7 @@ struct drm_display_mode;
struct drm_encoder;
struct drm_fbdev_cma;
struct drm_framebuffer;
+struct drm_plane;
struct imx_drm_crtc;
struct platform_device;
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs {
};
int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
- struct imx_drm_crtc **new_crtc,
+ struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
struct device_node *port);
int imx_drm_remove_crtc(struct imx_drm_crtc *);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index e671ad369416..f9597146dc67 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = {
{ .compatible = "fsl,imx53-tve", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
static struct platform_driver imx_tve_driver = {
.probe = imx_tve_probe,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7bc8301fafff..4ab841eebee1 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
spin_lock_irqsave(&drm->event_lock, flags);
if (ipu_crtc->page_flip_event)
- drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event);
+ drm_crtc_send_vblank_event(&ipu_crtc->base,
+ ipu_crtc->page_flip_event);
ipu_crtc->page_flip_event = NULL;
imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
spin_unlock_irqrestore(&drm->event_lock, flags);
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int dp = -EINVAL;
int ret;
- int id;
ret = ipu_get_resources(ipu_crtc, pdata);
if (ret) {
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
return ret;
}
+ if (pdata->dp >= 0)
+ dp = IPU_DP_FLOW_SYNC_BG;
+ ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(ipu_crtc->plane[0])) {
+ ret = PTR_ERR(ipu_crtc->plane[0]);
+ goto err_put_resources;
+ }
+
ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
- &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node);
+ &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
+ ipu_crtc->dev->of_node);
if (ret) {
dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
}
- if (pdata->dp >= 0)
- dp = IPU_DP_FLOW_SYNC_BG;
- id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
- ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
- pdata->dma[0], dp, BIT(id), true);
ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
if (ret) {
dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
/* If this crtc is using the DP, add an overlay plane */
if (pdata->dp >= 0 && pdata->dma[1] > 0) {
- ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu,
- pdata->dma[1],
- IPU_DP_FLOW_SYNC_FG,
- BIT(id), false);
+ ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
+ IPU_DP_FLOW_SYNC_FG,
+ drm_crtc_mask(&ipu_crtc->base),
+ DRM_PLANE_TYPE_OVERLAY);
if (IS_ERR(ipu_crtc->plane[1]))
ipu_crtc->plane[1] = NULL;
}
@@ -407,28 +412,6 @@ err_put_resources:
return ret;
}
-static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
- int port_id)
-{
- struct device_node *port;
- int id, ret;
-
- port = of_get_child_by_name(parent, "port");
- while (port) {
- ret = of_property_read_u32(port, "reg", &id);
- if (!ret && id == port_id)
- return port;
-
- do {
- port = of_get_next_child(parent, port);
- if (!port)
- return NULL;
- } while (of_node_cmp(port->name, "port"));
- }
-
- return NULL;
-}
-
static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
{
struct ipu_client_platformdata *pdata = dev->platform_data;
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = {
static int ipu_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ipu_client_platformdata *pdata = dev->platform_data;
int ret;
if (!dev->platform_data)
return -EINVAL;
- if (!dev->of_node) {
- /* Associate crtc device with the corresponding DI port node */
- dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
- pdata->di + 2);
- if (!dev->of_node) {
- dev_err(dev, "missing port@%d node in %s\n",
- pdata->di + 2, dev->parent->of_node->full_name);
- return -ENODEV;
- }
- }
-
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 575f4c84388f..e2ff410bab74 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = {
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
int dma, int dp, unsigned int possible_crtcs,
- bool priv)
+ enum drm_plane_type type)
{
struct ipu_plane *ipu_plane;
int ret;
@@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
ipu_plane->dma = dma;
ipu_plane->dp_flow = dp;
- ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs,
- &ipu_plane_funcs, ipu_plane_formats,
- ARRAY_SIZE(ipu_plane_formats),
- priv);
+ ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
+ &ipu_plane_funcs, ipu_plane_formats,
+ ARRAY_SIZE(ipu_plane_formats), type);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
kfree(ipu_plane);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 9b5eff18f5b8..3a443b413c60 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -34,7 +34,7 @@ struct ipu_plane {
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
int dma, int dp, unsigned int possible_crtcs,
- bool priv);
+ enum drm_plane_type type);
/* Init IDMAC, DMFC, DP */
int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index b4deb9cf9d71..2e9b9f1b5cd2 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (imxpd->panel && imxpd->panel->funcs &&
imxpd->panel->funcs->get_modes) {
+ struct drm_display_info *di = &connector->display_info;
+
num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
+ if (!imxpd->bus_format && di->num_bus_formats)
+ imxpd->bus_format = di->bus_formats[0];
if (num_modes > 0)
return num_modes;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 8f760002e401..913192c94876 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -159,7 +159,6 @@ struct nvkm_device_func {
struct nvkm_device_quirk {
u8 tv_pin_mask;
u8 tv_gpio;
- bool War00C800_0;
};
struct nvkm_device_chip {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index db6bc6760545..64c8d932d5f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
struct drm_device *dev = drm->dev;
struct nouveau_page_flip_state *s;
unsigned long flags;
- int crtcid = -1;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
- /* Vblank timestamps/counts are only correct on >= NV-50 */
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
- crtcid = s->crtc;
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ drm_arm_vblank_event(dev, s->crtc, s->event);
+ } else {
+ drm_send_vblank_event(dev, s->crtc, s->event);
- drm_send_vblank_event(dev, crtcid, s->event);
+ /* Give up ownership of vblank for page-flipped crtc */
+ drm_vblank_put(dev, s->crtc);
+ }
+ }
+ else {
+ /* Give up ownership of vblank for page-flipped crtc */
+ drm_vblank_put(dev, s->crtc);
}
-
- /* Give up ownership of vblank for page-flipped crtc */
- drm_vblank_put(dev, s->crtc);
list_del(&s->head);
if (ps)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index caf22b589edc..62ad0300cfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -259,12 +259,6 @@ nvkm_device_pci_10de_0df4[] = {
};
static const struct nvkm_device_pci_vendor
-nvkm_device_pci_10de_0fcd[] = {
- { 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */
- {}
-};
-
-static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0fd2[] = {
{ 0x1028, 0x0595, "GeForce GT 640M LE" },
{ 0x1028, 0x05b2, "GeForce GT 640M LE" },
@@ -279,12 +273,6 @@ nvkm_device_pci_10de_0fe3[] = {
};
static const struct nvkm_device_pci_vendor
-nvkm_device_pci_10de_0fe4[] = {
- { 0x144d, 0xc740, NULL, { .War00C800_0 = true } },
- {}
-};
-
-static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_104b[] = {
{ 0x1043, 0x844c, "GeForce GT 625" },
{ 0x1043, 0x846b, "GeForce GT 625" },
@@ -690,13 +678,6 @@ nvkm_device_pci_10de_1189[] = {
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1199[] = {
{ 0x1458, 0xd001, "GeForce GTX 760" },
- { 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */
- {}
-};
-
-static const struct nvkm_device_pci_vendor
-nvkm_device_pci_10de_11e0[] = {
- { 0x1558, 0x5106, NULL, { .War00C800_0 = true } },
{}
};
@@ -707,14 +688,6 @@ nvkm_device_pci_10de_11e3[] = {
};
static const struct nvkm_device_pci_vendor
-nvkm_device_pci_10de_11fc[] = {
- { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */
- { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
- { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
- {}
-};
-
-static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1247[] = {
{ 0x1043, 0x212a, "GeForce GT 635M" },
{ 0x1043, 0x212b, "GeForce GT 635M" },
@@ -1368,7 +1341,7 @@ nvkm_device_pci_10de[] = {
{ 0x0fc6, "GeForce GTX 650" },
{ 0x0fc8, "GeForce GT 740" },
{ 0x0fc9, "GeForce GT 730" },
- { 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd },
+ { 0x0fcd, "GeForce GT 755M" },
{ 0x0fce, "GeForce GT 640M LE" },
{ 0x0fd1, "GeForce GT 650M" },
{ 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
@@ -1382,7 +1355,7 @@ nvkm_device_pci_10de[] = {
{ 0x0fe1, "GeForce GT 730M" },
{ 0x0fe2, "GeForce GT 745M" },
{ 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
- { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 },
+ { 0x0fe4, "GeForce GT 750M" },
{ 0x0fe9, "GeForce GT 750M" },
{ 0x0fea, "GeForce GT 755M" },
{ 0x0fec, "GeForce 710A" },
@@ -1497,12 +1470,12 @@ nvkm_device_pci_10de[] = {
{ 0x11c6, "GeForce GTX 650 Ti" },
{ 0x11c8, "GeForce GTX 650" },
{ 0x11cb, "GeForce GT 740" },
- { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 },
+ { 0x11e0, "GeForce GTX 770M" },
{ 0x11e1, "GeForce GTX 765M" },
{ 0x11e2, "GeForce GTX 765M" },
{ 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
{ 0x11fa, "Quadro K4000" },
- { 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc },
+ { 0x11fc, "Quadro K2100M" },
{ 0x1200, "GeForce GTX 560 Ti" },
{ 0x1201, "GeForce GTX 560" },
{ 0x1203, "GeForce GTX 460 SE v2" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index d942fa7b9f18..86f9f3b13f71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -81,9 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
nvkm_rd32(device, 0x000200);
- if ( nvkm_boolopt(device->cfgopt, "War00C800_0",
- device->quirk ? device->quirk->War00C800_0 : false)) {
- nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
+ if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) {
switch (device->chipset) {
case 0xe4:
magic(device, 0x04000000);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b8e4cdec28c3..24f92bea39c7 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
dma_addr_t paddr;
int ret;
- /* only doing ARGB32 since this is what is needed to alpha-blend
- * with video overlays:
- */
sizes->surface_bpp = 32;
- sizes->surface_depth = 32;
+ sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 248953d2fdb7..f81fb2641097 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4173,11 +4173,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
control |= ib->length_dw | (vm_id << 24);
radeon_ring_write(ring, header);
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
radeon_ring_write(ring, control);
}
@@ -8472,7 +8468,7 @@ restart_ih:
if (queue_dp)
schedule_work(&rdev->dp_work);
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_reset) {
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
@@ -9630,6 +9626,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
+
+ /* Save number of lines the linebuffer leads before the scanout */
+ radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7f33767d7ed6..2ad462896896 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+ /* Save number of lines the linebuffer leads before the scanout */
+ radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -5344,7 +5347,7 @@ restart_ih:
if (queue_dp)
schedule_work(&rdev->dp_work);
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 238b13f045c1..9e7e2bf03b81 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev)
status = r100_irq_ack(rdev);
}
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
@@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
+ /* Guess line buffer size to be 8192 pixels */
+ u32 lb_size = 8192;
+
if (!rdev->mode_info.mode_config_initialized)
return;
@@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev)
DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
}
+
+ /* Save number of lines the linebuffer leads before the scanout */
+ if (mode1)
+ rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
+
+ if (mode2)
+ rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
}
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4ea5b10ff5f4..cc2fdf0be37a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4276,7 +4276,7 @@ restart_ih:
WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b6cbd816537e..87db64983ea8 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2414,7 +2414,7 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct radeon_rlc rlc;
struct radeon_mec mec;
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct work_struct dp_work;
struct work_struct audio_work;
int num_crtc; /* number of crtcs */
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index fe994aac3b04..c77d349c561c 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
PCI_VENDOR_ID_IBM, 0x0550, 1},
+ /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_IBM, 0x054d, 1},
/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
PCI_VENDOR_ID_IBM, 0x0530, 1},
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5a2cafb4f1bc..340f3f549f29 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (r < 0)
return connector_status_disconnected;
+ if (radeon_connector->detected_hpd_without_ddc) {
+ force = true;
+ radeon_connector->detected_hpd_without_ddc = false;
+ }
+
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto exit;
}
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
dret = radeon_ddc_probe(radeon_connector, false);
+
+ /* Sometimes the pins required for the DDC probe on DVI
+ * connectors don't make contact at the same time that the ones
+ * for HPD do. If the DDC probe fails even though we had an HPD
+ * signal, try again later */
+ if (!dret && !force &&
+ connector->status != connector_status_connected) {
+ DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+ radeon_connector->detected_hpd_without_ddc = true;
+ schedule_delayed_work(&rdev->hotplug_work,
+ msecs_to_jiffies(1000));
+ goto exit;
+ }
+ }
if (dret) {
radeon_connector->detected_by_load = false;
radeon_connector_free_edid(connector);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a8d9927ed9eb..1eca0acac016 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
* to complete in this vblank?
*/
if (update_pending &&
- (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
+ (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
+ crtc_id,
+ USE_REAL_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
@@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &radeon_crtc->base;
unsigned long flags;
int r;
+ int vpos, hpos, stat, min_udelay;
+ struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
down_read(&rdev->exclusive_lock);
if (work->fence) {
@@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* set the proper interrupt */
radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+ /* If this happens to execute within the "virtually extended" vblank
+ * interval before the start of the real vblank interval then it needs
+ * to delay programming the mmio flip until the real vblank is entered.
+ * This prevents completing a flip too early due to the way we fudge
+ * our vblank counter and vblank timestamps in order to work around the
+ * problem that the hw fires vblank interrupts before actual start of
+ * vblank (when line buffer refilling is done for a frame). It
+ * complements the fudging logic in radeon_get_crtc_scanoutpos() for
+ * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
+ *
+ * In practice this won't execute very often unless on very fast
+ * machines because the time window for this to happen is very small.
+ */
+ for (;;) {
+ /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ * start in hpos, and to the "fudged earlier" vblank start in
+ * vpos.
+ */
+ stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
+ GET_DISTANCE_TO_VBLANKSTART,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode);
+
+ if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
+ !(vpos >= 0 && hpos <= 0))
+ break;
+
+ /* Sleep at least until estimated real start of hw vblank */
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ usleep_range(min_udelay, 2 * min_udelay);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ };
+
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
@@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* \param dev Device to query.
* \param crtc Crtc to query.
* \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * For driver internal use only also supports these flags:
+ *
+ * USE_REAL_VBLANKSTART to use the real start of vblank instead
+ * of a fudged earlier start of vblank.
+ *
+ * GET_DISTANCE_TO_VBLANKSTART to return distance to the
+ * fudged earlier start of vblank in *vpos and the distance
+ * to true start of vblank in *hpos.
+ *
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
* \param *stime Target location for timestamp taken immediately before
@@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
vbl_end = 0;
}
+ /* Called from driver internal vblank counter query code? */
+ if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+ /* Caller wants distance from real vbl_start in *hpos */
+ *hpos = *vpos - vbl_start;
+ }
+
+ /* Fudge vblank to start a few scanlines earlier to handle the
+ * problem that vblank irqs fire a few scanlines before start
+ * of vblank. Some driver internal callers need the true vblank
+ * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
+ *
+ * The cause of the "early" vblank irq is that the irq is triggered
+ * by the line buffer logic when the line buffer read position enters
+ * the vblank, whereas our crtc scanout position naturally lags the
+ * line buffer read position.
+ */
+ if (!(flags & USE_REAL_VBLANKSTART))
+ vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
+
/* Test scanout position against vblank region. */
if ((*vpos < vbl_start) && (*vpos >= vbl_end))
in_vbl = false;
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+ /* Called from driver internal vblank counter query code? */
+ if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+ /* Caller wants distance from fudged earlier vbl_start */
+ *vpos -= vbl_start;
+ return ret;
+ }
+
/* Check if inside vblank area and apply corrective offsets:
* vpos will then be >=0 in video scanout area, but negative
* within vblank area, counting down the number of lines until
@@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* Correct for shifted end of vbl at vbl_end. */
*vpos = *vpos - vbl_end;
- /* In vblank? */
- if (in_vbl)
- ret |= DRM_SCANOUTPOS_IN_VBLANK;
-
- /* Is vpos outside nominal vblank area, but less than
- * 1/100 of a frame height away from start of vblank?
- * If so, assume this isn't a massively delayed vblank
- * interrupt, but a vblank interrupt that fired a few
- * microseconds before true start of vblank. Compensate
- * by adding a full frame duration to the final timestamp.
- * Happens, e.g., on ATI R500, R600.
- *
- * We only do this if DRM_CALLED_FROM_VBLIRQ.
- */
- if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
- vbl_start = mode->crtc_vdisplay;
- vtotal = mode->crtc_vtotal;
-
- if (vbl_start - *vpos < vtotal / 100) {
- *vpos -= vtotal;
-
- /* Signal this correction as "applied". */
- ret |= 0x8;
- }
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 171d3e43c30c..979f3bf65f2c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
static void radeon_hotplug_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
- hotplug_work);
+ hotplug_work.work);
struct drm_device *dev = rdev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
}
}
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
if (r) {
rdev->irq.installed = false;
- flush_work(&rdev->hotplug_work);
+ flush_delayed_work(&rdev->hotplug_work);
return r;
}
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
- flush_work(&rdev->hotplug_work);
+ flush_delayed_work(&rdev->hotplug_work);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0ec6fcca16d3..d290a8a09036 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
*/
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
{
+ int vpos, hpos, stat;
+ u32 count;
struct radeon_device *rdev = dev->dev_private;
if (crtc < 0 || crtc >= rdev->num_crtc) {
@@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
return -EINVAL;
}
- return radeon_get_vblank_counter(rdev, crtc);
+ /* The hw increments its frame counter at start of vsync, not at start
+ * of vblank, as is required by DRM core vblank counter handling.
+ * Cook the hw count here to make it appear to the caller as if it
+ * incremented at start of vblank. We measure distance to start of
+ * vblank in vpos. vpos therefore will be >= 0 between start of vblank
+ * and start of vsync, so vpos >= 0 means to bump the hw frame counter
+ * result by 1 to give the proper appearance to caller.
+ */
+ if (rdev->mode_info.crtcs[crtc]) {
+ /* Repeat readout if needed to provide stable result if
+ * we cross start of vsync during the queries.
+ */
+ do {
+ count = radeon_get_vblank_counter(rdev, crtc);
+ /* Ask radeon_get_crtc_scanoutpos to return vpos as
+ * distance to start of vblank, instead of regular
+ * vertical scanout pos.
+ */
+ stat = radeon_get_crtc_scanoutpos(
+ dev, crtc, GET_DISTANCE_TO_VBLANKSTART,
+ &vpos, &hpos, NULL, NULL,
+ &rdev->mode_info.crtcs[crtc]->base.hwmode);
+ } while (count != radeon_get_vblank_counter(rdev, crtc));
+
+ if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
+ DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
+ }
+ else {
+ DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
+ crtc, vpos);
+
+ /* Bump counter if we are at >= leading edge of vblank,
+ * but before vsync where vpos would turn negative and
+ * the hw counter really increments.
+ */
+ if (vpos >= 0)
+ count++;
+ }
+ }
+ else {
+ /* Fallback to use value as is. */
+ count = radeon_get_vblank_counter(rdev, crtc);
+ DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
+ }
+
+ return count;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 830e171c3a9e..bba112628b47 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -367,6 +367,7 @@ struct radeon_crtc {
u32 line_time;
u32 wm_low;
u32 wm_high;
+ u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
enum radeon_output_csc output_csc;
};
@@ -553,6 +554,7 @@ struct radeon_connector {
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
+ bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
uint16_t connector_object_id;
struct radeon_hpd hpd;
struct radeon_router router;
@@ -686,6 +688,9 @@ struct atom_voltage_table
struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
};
+/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */
+#define USE_REAL_VBLANKSTART (1 << 30)
+#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
extern void
radeon_add_atom_connector(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f4f03dcc1530..59abebd6b5dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1756,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0,
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
+ crtc,
+ USE_REAL_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&rdev->mode_info.crtcs[crtc]->base.hwmode);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 574f62bbd215..7eb1ae758906 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -361,31 +361,31 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
/* stitch together an VCE create msg */
ib.length_dw = 0;
- ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
- ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
- ib.ptr[ib.length_dw++] = handle;
-
- ib.ptr[ib.length_dw++] = 0x00000030; /* len */
- ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
- ib.ptr[ib.length_dw++] = 0x00000000;
- ib.ptr[ib.length_dw++] = 0x00000042;
- ib.ptr[ib.length_dw++] = 0x0000000a;
- ib.ptr[ib.length_dw++] = 0x00000001;
- ib.ptr[ib.length_dw++] = 0x00000080;
- ib.ptr[ib.length_dw++] = 0x00000060;
- ib.ptr[ib.length_dw++] = 0x00000100;
- ib.ptr[ib.length_dw++] = 0x00000100;
- ib.ptr[ib.length_dw++] = 0x0000000c;
- ib.ptr[ib.length_dw++] = 0x00000000;
-
- ib.ptr[ib.length_dw++] = 0x00000014; /* len */
- ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
- ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
- ib.ptr[ib.length_dw++] = dummy;
- ib.ptr[ib.length_dw++] = 0x00000001;
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
+
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
+
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
+ ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
for (i = ib.length_dw; i < ib_size_dw; ++i)
- ib.ptr[i] = 0x0;
+ ib.ptr[i] = cpu_to_le32(0x0);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
@@ -428,21 +428,21 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
/* stitch together an VCE destroy msg */
ib.length_dw = 0;
- ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
- ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
- ib.ptr[ib.length_dw++] = handle;
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
- ib.ptr[ib.length_dw++] = 0x00000014; /* len */
- ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
- ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
- ib.ptr[ib.length_dw++] = dummy;
- ib.ptr[ib.length_dw++] = 0x00000001;
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
+ ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
- ib.ptr[ib.length_dw++] = 0x00000008; /* len */
- ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */
+ ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */
for (i = ib.length_dw; i < ib_size_dw; ++i)
- ib.ptr[i] = 0x0;
+ ib.ptr[i] = cpu_to_le32(0x0);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
@@ -699,12 +699,12 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
{
uint64_t addr = semaphore->gpu_addr;
- radeon_ring_write(ring, VCE_CMD_SEMAPHORE);
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
- radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
+ radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE));
+ radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF));
+ radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF));
+ radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0)));
if (!emit_wait)
- radeon_ring_write(ring, VCE_CMD_END);
+ radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
return true;
}
@@ -719,10 +719,10 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- radeon_ring_write(ring, VCE_CMD_IB);
- radeon_ring_write(ring, ib->gpu_addr);
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
- radeon_ring_write(ring, ib->length_dw);
+ radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB));
+ radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr));
+ radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr)));
+ radeon_ring_write(ring, cpu_to_le32(ib->length_dw));
}
/**
@@ -738,12 +738,12 @@ void radeon_vce_fence_emit(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[fence->ring];
uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
- radeon_ring_write(ring, VCE_CMD_FENCE);
- radeon_ring_write(ring, addr);
- radeon_ring_write(ring, upper_32_bits(addr));
- radeon_ring_write(ring, fence->seq);
- radeon_ring_write(ring, VCE_CMD_TRAP);
- radeon_ring_write(ring, VCE_CMD_END);
+ radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE));
+ radeon_ring_write(ring, cpu_to_le32(addr));
+ radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr)));
+ radeon_ring_write(ring, cpu_to_le32(fence->seq));
+ radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP));
+ radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
}
/**
@@ -765,7 +765,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring->idx, r);
return r;
}
- radeon_ring_write(ring, VCE_CMD_END);
+ radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 97a904835759..6244f4e44e9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev)
status = rs600_irq_ack(rdev);
}
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
if (rdev->msi_enabled) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 516ca27cfa12..6bc44c24e837 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
{
u32 tmp;
+ /* Guess line buffer size to be 8192 pixels */
+ u32 lb_size = 8192;
+
/*
* Line Buffer Setup
* There is a single line buffer shared by both display controllers.
@@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
}
WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
+
+ /* Save number of lines the linebuffer leads before the scanout */
+ if (mode1)
+ rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
+
+ if (mode2)
+ rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
}
struct rs690_watermark {
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 07037e32dea3..f878d6962da5 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+ /* Save number of lines the linebuffer leads before the scanout */
+ radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -6848,7 +6851,7 @@ restart_ih:
if (queue_dp)
schedule_work(&rdev->dp_work);
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 8caea0a33dd8..d908321b94ce 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
obj->size, &rk_obj->dma_attrs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d8ae5e49c44..03c47eeadc81 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = {
.data = &rk3288_vop },
{},
};
+MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
{
@@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane,
val = (dest.y2 - dest.y1 - 1) << 16;
val |= (dest.x2 - dest.x1 - 1) & 0xffff;
VOP_WIN_SET(vop, win, dsp_info, val);
- val = (dsp_sty - 1) << 16;
- val |= (dsp_stx - 1) & 0xffff;
+ val = dsp_sty << 16;
+ val |= dsp_stx & 0xffff;
VOP_WIN_SET(vop, win, dsp_st, val);
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
@@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win,
if (state->event) {
spin_lock_irqsave(&drm->event_lock, flags);
- drm_send_vblank_event(drm, -1, state->event);
+ drm_crtc_send_vblank_event(crtc, state->event);
spin_unlock_irqrestore(&drm->event_lock, flags);
}
@@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop)
return PTR_ERR(vop->dclk);
}
- ret = clk_prepare(vop->hclk);
- if (ret < 0) {
- dev_err(vop->dev, "failed to prepare hclk\n");
- return ret;
- }
-
ret = clk_prepare(vop->dclk);
if (ret < 0) {
dev_err(vop->dev, "failed to prepare dclk\n");
- goto err_unprepare_hclk;
+ return ret;
}
- ret = clk_prepare(vop->aclk);
+ /* Enable both the hclk and aclk to setup the vop */
+ ret = clk_prepare_enable(vop->hclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare aclk\n");
+ dev_err(vop->dev, "failed to prepare/enable hclk\n");
goto err_unprepare_dclk;
}
- /*
- * enable hclk, so that we can config vop register.
- */
- ret = clk_enable(vop->hclk);
+ ret = clk_prepare_enable(vop->aclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare aclk\n");
- goto err_unprepare_aclk;
+ dev_err(vop->dev, "failed to prepare/enable aclk\n");
+ goto err_disable_hclk;
}
+
/*
* do hclk_reset, reset all vop registers.
*/
@@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop)
if (IS_ERR(ahb_rst)) {
dev_err(vop->dev, "failed to get ahb reset\n");
ret = PTR_ERR(ahb_rst);
- goto err_disable_hclk;
+ goto err_disable_aclk;
}
reset_control_assert(ahb_rst);
usleep_range(10, 20);
@@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop)
if (IS_ERR(vop->dclk_rst)) {
dev_err(vop->dev, "failed to get dclk reset\n");
ret = PTR_ERR(vop->dclk_rst);
- goto err_unprepare_aclk;
+ goto err_disable_aclk;
}
reset_control_assert(vop->dclk_rst);
usleep_range(10, 20);
reset_control_deassert(vop->dclk_rst);
clk_disable(vop->hclk);
+ clk_disable(vop->aclk);
vop->is_enabled = false;
return 0;
+err_disable_aclk:
+ clk_disable_unprepare(vop->aclk);
err_disable_hclk:
- clk_disable(vop->hclk);
-err_unprepare_aclk:
- clk_unprepare(vop->aclk);
+ clk_disable_unprepare(vop->hclk);
err_unprepare_dclk:
clk_unprepare(vop->dclk);
-err_unprepare_hclk:
- clk_unprepare(vop->hclk);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 6a954544727f..f154fb1929bd 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -180,7 +180,7 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
spin_unlock(&lock->lock);
}
} else
- wait_event(lock->queue, __ttm_read_lock(lock));
+ wait_event(lock->queue, __ttm_write_lock(lock));
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f545913a56c7..578fe0a9324c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -412,7 +412,7 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
.save = virtio_gpu_conn_save,
.restore = virtio_gpu_conn_restore,
.detect = virtio_gpu_conn_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes_nomerge,
.destroy = virtio_gpu_conn_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a09cf8529b9f..c49812b80dd0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1233,6 +1233,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+ vmw_kms_legacy_hotspot_clear(dev_priv);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
drm_master_put(&vmw_fp->locked_master);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a8ae9dfb83b7..469cdd520615 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -925,6 +925,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index a8baf5f5e765..b6a0806b06bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -390,7 +390,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
else if (ctx_id == SVGA3D_INVALID_ID)
ret = vmw_local_fifo_reserve(dev_priv, bytes);
else {
- WARN_ON("Command buffer has not been allocated.\n");
+ WARN(1, "Command buffer has not been allocated.\n");
ret = NULL;
}
if (IS_ERR_OR_NULL(ret)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9fcd7f82995c..9b4bb9e74d73 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -133,13 +133,19 @@ void vmw_cursor_update_position(struct vmw_private *dev_priv,
vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
}
-int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
+
+/*
+ * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
+ */
+int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height,
+ int32_t hot_x, int32_t hot_y)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *dmabuf = NULL;
+ s32 hotspot_x, hotspot_y;
int ret;
/*
@@ -151,6 +157,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
*/
drm_modeset_unlock_crtc(crtc);
drm_modeset_lock_all(dev_priv->dev);
+ hotspot_x = hot_x + du->hotspot_x;
+ hotspot_y = hot_y + du->hotspot_y;
/* A lot of the code assumes this */
if (handle && (width != 64 || height != 64)) {
@@ -187,31 +195,34 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
vmw_dmabuf_unreference(&du->cursor_dmabuf);
/* setup new image */
+ ret = 0;
if (surface) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_surface = surface;
du->cursor_surface->snooper.crtc = crtc;
du->cursor_age = du->cursor_surface->snooper.age;
- vmw_cursor_update_image(dev_priv, surface->snooper.image,
- 64, 64, du->hotspot_x, du->hotspot_y);
+ ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
+ 64, 64, hotspot_x, hotspot_y);
} else if (dmabuf) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
- du->hotspot_x, du->hotspot_y);
+ hotspot_x, hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
- ret = 0;
goto out;
}
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + du->hotspot_x,
- du->cursor_y + du->hotspot_y);
+ if (!ret) {
+ vmw_cursor_update_position(dev_priv, true,
+ du->cursor_x + hotspot_x,
+ du->cursor_y + hotspot_y);
+ du->core_hotspot_x = hot_x;
+ du->core_hotspot_y = hot_y;
+ }
- ret = 0;
out:
drm_modeset_unlock_all(dev_priv->dev);
drm_modeset_lock_crtc(crtc, crtc->cursor);
@@ -239,8 +250,10 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
drm_modeset_lock_all(dev_priv->dev);
vmw_cursor_update_position(dev_priv, shown,
- du->cursor_x + du->hotspot_x,
- du->cursor_y + du->hotspot_y);
+ du->cursor_x + du->hotspot_x +
+ du->core_hotspot_x,
+ du->cursor_y + du->hotspot_y +
+ du->core_hotspot_y);
drm_modeset_unlock_all(dev_priv->dev);
drm_modeset_lock_crtc(crtc, crtc->cursor);
@@ -334,6 +347,29 @@ err_unreserve:
ttm_bo_unreserve(bo);
}
+/**
+ * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
+ *
+ * @dev_priv: Pointer to the device private struct.
+ *
+ * Clears all legacy hotspots.
+ */
+void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_display_unit *du;
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(dev);
+ drm_for_each_crtc(crtc, dev) {
+ du = vmw_crtc_to_du(crtc);
+
+ du->hotspot_x = 0;
+ du->hotspot_y = 0;
+ }
+ drm_modeset_unlock_all(dev);
+}
+
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -351,7 +387,9 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv,
du->cursor_surface->snooper.image,
- 64, 64, du->hotspot_x, du->hotspot_y);
+ 64, 64,
+ du->hotspot_x + du->core_hotspot_x,
+ du->hotspot_y + du->core_hotspot_y);
}
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 782df7ca9794..edd81503516d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -159,6 +159,8 @@ struct vmw_display_unit {
int hotspot_x;
int hotspot_y;
+ s32 core_hotspot_x;
+ s32 core_hotspot_y;
unsigned unit;
@@ -193,8 +195,9 @@ void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size);
-int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height);
+int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height,
+ int32_t hot_x, int32_t hot_y);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
void vmw_du_connector_save(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index bb63e4d795fa..52caecb4502e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -297,7 +297,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
- .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_set2 = vmw_du_crtc_cursor_set2,
.cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b96d1ab610c5..13926ff192e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -533,7 +533,7 @@ out_no_fence:
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
- .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_set2 = vmw_du_crtc_cursor_set2,
.cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b1fc1c02792d..f823fc3efed7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1043,7 +1043,7 @@ out_finish:
static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
- .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_set2 = vmw_du_crtc_cursor_set2,
.cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_stdu_crtc_destroy,
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index ba47b30d28fa..f2e13eb8339f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -28,6 +28,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <drm/drm_fourcc.h>
@@ -993,12 +994,26 @@ static void platform_device_unregister_children(struct platform_device *pdev)
struct ipu_platform_reg {
struct ipu_client_platformdata pdata;
const char *name;
- int reg_offset;
};
+/* These must be in the order of the corresponding device tree port nodes */
static const struct ipu_platform_reg client_reg[] = {
{
.pdata = {
+ .csi = 0,
+ .dma[0] = IPUV3_CHANNEL_CSI0,
+ .dma[1] = -EINVAL,
+ },
+ .name = "imx-ipuv3-camera",
+ }, {
+ .pdata = {
+ .csi = 1,
+ .dma[0] = IPUV3_CHANNEL_CSI1,
+ .dma[1] = -EINVAL,
+ },
+ .name = "imx-ipuv3-camera",
+ }, {
+ .pdata = {
.di = 0,
.dc = 5,
.dp = IPU_DP_FLOW_SYNC_BG,
@@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = {
.dma[1] = -EINVAL,
},
.name = "imx-ipuv3-crtc",
- }, {
- .pdata = {
- .csi = 0,
- .dma[0] = IPUV3_CHANNEL_CSI0,
- .dma[1] = -EINVAL,
- },
- .reg_offset = IPU_CM_CSI0_REG_OFS,
- .name = "imx-ipuv3-camera",
- }, {
- .pdata = {
- .csi = 1,
- .dma[0] = IPUV3_CHANNEL_CSI1,
- .dma[1] = -EINVAL,
- },
- .reg_offset = IPU_CM_CSI1_REG_OFS,
- .name = "imx-ipuv3-camera",
},
};
@@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev;
- struct resource res;
-
- if (reg->reg_offset) {
- memset(&res, 0, sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
- res.end = res.start + PAGE_SIZE - 1;
- pdev = platform_device_register_resndata(dev, reg->name,
- id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
- } else {
- pdev = platform_device_register_data(dev, reg->name,
- id++, &reg->pdata, sizeof(reg->pdata));
+
+ pdev = platform_device_alloc(reg->name, id++);
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto err_register;
+ }
+
+ pdev->dev.parent = dev;
+
+ /* Associate subdevice with the corresponding port node */
+ pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
+ if (!pdev->dev.of_node) {
+ dev_err(dev, "missing port@%d node in %s\n", i,
+ dev->of_node->full_name);
+ ret = -ENODEV;
+ goto err_register;
}
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
+ ret = platform_device_add_data(pdev, &reg->pdata,
+ sizeof(reg->pdata));
+ if (!ret)
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
goto err_register;
}
}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 3166e4bc4eb6..9abcaa53bd25 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -395,8 +395,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
set_current_state(interruptible ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
- if (signal_pending(current)) {
- rc = -EINTR;
+ if (interruptible && signal_pending(current)) {
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&vga_wait_queue, &wait);
+ rc = -ERESTARTSYS;
break;
}
schedule();
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9024a3de4032..8b78a7f1f779 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -316,11 +316,6 @@
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
#define USB_VENDOR_ID_ELAN 0x04f3
-#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
-#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
-#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
-#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c
-#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2324520b006d..7dd0953cd70f 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,11 +72,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
@@ -340,7 +336,8 @@ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
for (; hid_blacklist[n].idVendor; n++)
if (hid_blacklist[n].idVendor == idVendor &&
- hid_blacklist[n].idProduct == idProduct)
+ (hid_blacklist[n].idProduct == (__u16) HID_ANY_ID ||
+ hid_blacklist[n].idProduct == idProduct))
bl_entry = &hid_blacklist[n];
if (bl_entry != NULL)
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 0c4618b4d515..c2babe50a0d8 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -839,8 +839,10 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
for_each_available_child_of_node(node, child) {
ret = vadc_get_dt_channel_data(vadc->dev, &prop, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ret;
+ }
vadc->chan_props[index] = prop;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d7e908acb480..0f6f63b20263 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -302,7 +302,7 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
- WARN_ON("Trying to set scanmask prior to registering buffer\n");
+ WARN(1, "Trying to set scanmask prior to registering buffer\n");
goto err_invalid_mask;
}
bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 208358f9e7e3..159ede61f793 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -655,7 +655,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
break;
case IIO_SEPARATE:
if (!chan->indexed) {
- WARN_ON("Differential channels must be indexed\n");
+ WARN(1, "Differential channels must be indexed\n");
ret = -EINVAL;
goto error_free_full_postfix;
}
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 7d269ef9e062..f6a07dc32ae4 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -453,6 +453,7 @@ static int apds9960_set_power_state(struct apds9960_data *data, bool on)
usleep_range(data->als_adc_int_us,
APDS9960_MAX_INT_TIME_IN_US);
} else {
+ pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 961f9f990faf..e544fcfd5ced 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -130,10 +130,10 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
if (ret < 0)
break;
- /* return 0 since laser is likely pointed out of range */
+ /* return -EINVAL since laser is likely pointed out of range */
if (ret & LIDAR_REG_STATUS_INVALID) {
*reg = 0;
- ret = 0;
+ ret = -EINVAL;
break;
}
@@ -197,7 +197,7 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
if (!ret) {
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
iio_get_time_ns());
- } else {
+ } else if (ret != -EINVAL) {
dev_err(&data->client->dev, "cannot read LIDAR measurement");
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 944cd90417bc..d2d5d004f16d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1126,10 +1126,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
rcu_read_lock();
err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
- if (err)
- return false;
-
- ret = FIB_RES_DEV(res) == net_dev;
+ ret = err == 0 && FIB_RES_DEV(res) == net_dev;
rcu_read_unlock();
return ret;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 8d8af7a41a30..2281de122038 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1811,6 +1811,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr,
if (qp_num == 0)
valid = 1;
} else {
+ /* CM attributes other than ClassPortInfo only use Send method */
+ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
+ (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
+ (mad_hdr->method != IB_MGMT_METHOD_SEND))
+ goto out;
/* Filter GSI packets sent to QP0 */
if (qp_num != 0)
valid = 1;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 2aba774f835b..a95a32ba596e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -512,7 +512,7 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
return len;
}
-static int ib_nl_send_msg(struct ib_sa_query *query)
+static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
@@ -526,7 +526,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
if (len <= 0)
return -EMSGSIZE;
- skb = nlmsg_new(len, GFP_KERNEL);
+ skb = nlmsg_new(len, gfp_mask);
if (!skb)
return -ENOMEM;
@@ -544,7 +544,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
+ ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
if (!ret)
ret = len;
else
@@ -553,7 +553,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
return ret;
}
-static int ib_nl_make_request(struct ib_sa_query *query)
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{
unsigned long flags;
unsigned long delay;
@@ -562,25 +562,27 @@ static int ib_nl_make_request(struct ib_sa_query *query)
INIT_LIST_HEAD(&query->list);
query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+ /* Put the request on the list first.*/
spin_lock_irqsave(&ib_nl_request_lock, flags);
- ret = ib_nl_send_msg(query);
- if (ret <= 0) {
- ret = -EIO;
- goto request_out;
- } else {
- ret = 0;
- }
-
delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
query->timeout = delay + jiffies;
list_add_tail(&query->list, &ib_nl_request_list);
/* Start the timeout if this is the only request */
if (ib_nl_request_list.next == &query->list)
queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
-
-request_out:
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+ ret = ib_nl_send_msg(query, gfp_mask);
+ if (ret <= 0) {
+ ret = -EIO;
+ /* Remove the request */
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ list_del(&query->list);
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+ } else {
+ ret = 0;
+ }
+
return ret;
}
@@ -1108,7 +1110,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
- if (!ib_nl_make_request(query))
+ if (!ib_nl_make_request(query, gfp_mask))
return id;
}
ib_sa_disable_local_svc(query);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 94816aeb95a0..1c02deab068f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
* The ib_uobject locking scheme is as follows:
*
* - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
- * needs to be held during all idr operations. When an object is
+ * needs to be held during all idr write operations. When an object is
* looked up, a reference must be taken on the object's kref before
- * dropping this lock.
+ * dropping this lock. For read operations, the rcu_read_lock()
+ * and rcu_write_lock() but similarly the kref reference is grabbed
+ * before the rcu_read_unlock().
*
* - Each object also has an rwsem. This rwsem must be held for
* reading while an operation that uses the object is performed.
@@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
static void release_uobj(struct kref *kref)
{
- kfree(container_of(kref, struct ib_uobject, ref));
+ kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
}
static void put_uobj(struct ib_uobject *uobj)
@@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
{
struct ib_uobject *uobj;
- spin_lock(&ib_uverbs_idr_lock);
+ rcu_read_lock();
uobj = idr_find(idr, id);
if (uobj) {
if (uobj->context == context)
@@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
else
uobj = NULL;
}
- spin_unlock(&ib_uverbs_idr_lock);
+ rcu_read_unlock();
return uobj;
}
@@ -2446,6 +2448,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
int i, sg_ind;
int is_ud;
ssize_t ret = -EINVAL;
+ size_t next_size;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
@@ -2490,7 +2493,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
goto out_put;
}
- ud = alloc_wr(sizeof(*ud), user_wr->num_sge);
+ next_size = sizeof(*ud);
+ ud = alloc_wr(next_size, user_wr->num_sge);
if (!ud) {
ret = -ENOMEM;
goto out_put;
@@ -2511,7 +2515,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
user_wr->opcode == IB_WR_RDMA_READ) {
struct ib_rdma_wr *rdma;
- rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge);
+ next_size = sizeof(*rdma);
+ rdma = alloc_wr(next_size, user_wr->num_sge);
if (!rdma) {
ret = -ENOMEM;
goto out_put;
@@ -2525,7 +2530,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
struct ib_atomic_wr *atomic;
- atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge);
+ next_size = sizeof(*atomic);
+ atomic = alloc_wr(next_size, user_wr->num_sge);
if (!atomic) {
ret = -ENOMEM;
goto out_put;
@@ -2540,7 +2546,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
} else if (user_wr->opcode == IB_WR_SEND ||
user_wr->opcode == IB_WR_SEND_WITH_IMM ||
user_wr->opcode == IB_WR_SEND_WITH_INV) {
- next = alloc_wr(sizeof(*next), user_wr->num_sge);
+ next_size = sizeof(*next);
+ next = alloc_wr(next_size, user_wr->num_sge);
if (!next) {
ret = -ENOMEM;
goto out_put;
@@ -2572,7 +2579,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
if (next->num_sge) {
next->sg_list = (void *) next +
- ALIGN(sizeof *next, sizeof (struct ib_sge));
+ ALIGN(next_size, sizeof(struct ib_sge));
if (copy_from_user(next->sg_list,
buf + sizeof cmd +
cmd.wr_count * cmd.wqe_size +
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 043a60ee6836..545906dec26d 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1516,7 +1516,7 @@ EXPORT_SYMBOL(ib_map_mr_sg);
* @sg_nents: number of entries in sg
* @set_page: driver page assignment function pointer
*
- * Core service helper for drivers to covert the largest
+ * Core service helper for drivers to convert the largest
* prefix of given sg list to a page vector. The sg list
* prefix converted is the prefix that meet the requirements
* of ib_map_mr_sg.
@@ -1533,7 +1533,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
u64 last_end_dma_addr = 0, last_page_addr = 0;
unsigned int last_page_off = 0;
u64 page_mask = ~((u64)mr->page_size - 1);
- int i;
+ int i, ret;
mr->iova = sg_dma_address(&sgl[0]);
mr->length = 0;
@@ -1544,27 +1544,29 @@ int ib_sg_to_pages(struct ib_mr *mr,
u64 end_dma_addr = dma_addr + dma_len;
u64 page_addr = dma_addr & page_mask;
- if (i && page_addr != dma_addr) {
- if (last_end_dma_addr != dma_addr) {
- /* gap */
- goto done;
-
- } else if (last_page_off + dma_len <= mr->page_size) {
- /* chunk this fragment with the last */
- mr->length += dma_len;
- last_end_dma_addr += dma_len;
- last_page_off += dma_len;
- continue;
- } else {
- /* map starting from the next page */
- page_addr = last_page_addr + mr->page_size;
- dma_len -= mr->page_size - last_page_off;
- }
+ /*
+ * For the second and later elements, check whether either the
+ * end of element i-1 or the start of element i is not aligned
+ * on a page boundary.
+ */
+ if (i && (last_page_off != 0 || page_addr != dma_addr)) {
+ /* Stop mapping if there is a gap. */
+ if (last_end_dma_addr != dma_addr)
+ break;
+
+ /*
+ * Coalesce this element with the last. If it is small
+ * enough just update mr->length. Otherwise start
+ * mapping from the next page.
+ */
+ goto next_page;
}
do {
- if (unlikely(set_page(mr, page_addr)))
- goto done;
+ ret = set_page(mr, page_addr);
+ if (unlikely(ret < 0))
+ return i ? : ret;
+next_page:
page_addr += mr->page_size;
} while (page_addr < end_dma_addr);
@@ -1574,7 +1576,6 @@ int ib_sg_to_pages(struct ib_mr *mr,
last_page_off = end_dma_addr & ~page_mask;
}
-done:
return i;
}
EXPORT_SYMBOL(ib_sg_to_pages);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f567160a4a56..97d6878f9938 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -456,7 +456,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg);
- props->max_sge_rd = props->max_sge;
+ props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
props->max_mr = dev->dev->quotas.mpt;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a2e4ca56da44..13eaaf45288f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -34,6 +34,7 @@
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
@@ -795,8 +796,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_mtt;
- qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp);
- qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp);
+ qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp);
+ if (!qp->sq.wrid)
+ qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
+ gfp, PAGE_KERNEL);
+ qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp);
+ if (!qp->rq.wrid)
+ qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
+ gfp, PAGE_KERNEL);
if (!qp->sq.wrid || !qp->rq.wrid) {
err = -ENOMEM;
goto err_wrid;
@@ -886,8 +893,8 @@ err_wrid:
if (qp_has_rq(init_attr))
mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
} else {
- kfree(qp->sq.wrid);
- kfree(qp->rq.wrid);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->rq.wrid);
}
err_mtt:
@@ -1062,8 +1069,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
&qp->db);
ib_umem_release(qp->umem);
} else {
- kfree(qp->sq.wrid);
- kfree(qp->rq.wrid);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->rq.wrid);
if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
free_proxy_bufs(&dev->ib_dev, qp);
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index dce5dfe3a70e..8d133c40fa0e 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -34,6 +34,7 @@
#include <linux/mlx4/qp.h>
#include <linux/mlx4/srq.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include "mlx4_ib.h"
#include "user.h"
@@ -172,8 +173,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
if (!srq->wrid) {
- err = -ENOMEM;
- goto err_mtt;
+ srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64),
+ GFP_KERNEL, PAGE_KERNEL);
+ if (!srq->wrid) {
+ err = -ENOMEM;
+ goto err_mtt;
+ }
}
}
@@ -204,7 +209,7 @@ err_wrid:
if (pd->uobject)
mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
else
- kfree(srq->wrid);
+ kvfree(srq->wrid);
err_mtt:
mlx4_mtt_cleanup(dev->dev, &srq->mtt);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ec8993a7b3be..6000f7aeede9 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -381,7 +381,19 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
}
}
} else if (ent->cur > 2 * ent->limit) {
- if (!someone_adding(cache) &&
+ /*
+ * The remove_keys() logic is performed as garbage collection
+ * task. Such task is intended to be run when no other active
+ * processes are running.
+ *
+ * The need_resched() will return TRUE if there are user tasks
+ * to be activated in near future.
+ *
+ * In such case, we don't execute remove_keys() and postpone
+ * the garbage collection work to try to run in next cycle,
+ * in order to free CPU resources to other tasks.
+ */
+ if (!need_resched() && !someone_adding(cache) &&
time_after(jiffies, cache->last_add + 300 * HZ)) {
remove_keys(dev, i, 1);
if (ent->cur > ent->limit)
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 5e27f76805e2..4c7c3c84a741 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -292,7 +292,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
qib_dev_porterr(ppd->dd, ppd->port,
"QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
- if ((peek[2] & 2) == 0) {
+ if ((peek[2] & 4) == 0) {
/*
* If cable is paged, rather than "flat memory", we need to
* set the page to zero, Even if it already appears to be zero.
@@ -538,7 +538,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
QSFP_DATE_LEN, cd.date);
sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
- QSFP_LOT_LEN, cd.date);
+ QSFP_LOT_LEN, cd.lot);
while (bidx < QSFP_DEFAULT_HDR_CNT) {
int iidx;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 2baf5ad251ed..bc803f33d5f6 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -329,9 +329,9 @@ struct qib_sge {
struct qib_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
- struct qib_mregion mr; /* must be last */
u64 *pages;
u32 npages;
+ struct qib_mregion mr; /* must be last */
};
/*
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a93070210109..42f4da620f2e 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1293,7 +1293,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
sector_t sector_off = mr_status.sig_err.sig_err_offset;
- do_div(sector_off, sector_size + 8);
+ sector_div(sector_off, sector_size + 8);
*sector = scsi_get_lba(iser_task->sc) + sector_off;
pr_err("PI error found type %d at sector %llx "
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dfbbbb28090b..8a51c3b5d657 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -157,16 +157,9 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.recv_cq = comp->cq;
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
- /*
- * FIXME: Use devattr.max_sge - 2 for max_send_sge as
- * work-around for RDMA_READs with ConnectX-2.
- *
- * Also, still make sure to have at least two SGEs for
- * outgoing control PDU responses.
- */
- attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
- isert_conn->max_sge = attr.cap.max_send_sge;
-
+ attr.cap.max_send_sge = device->dev_attr.max_sge;
+ isert_conn->max_sge = min(device->dev_attr.max_sge,
+ device->dev_attr.max_sge_rd);
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9909022dc6c3..3db9a659719b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -488,7 +488,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_qp *qp;
struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL;
- const int m = 1 + dev->use_fast_reg;
+ const int m = dev->use_fast_reg ? 3 : 1;
struct ib_cq_init_attr cq_attr = {};
int ret;
@@ -994,16 +994,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
ret = srp_lookup_path(ch);
if (ret)
- return ret;
+ goto out;
while (1) {
init_completion(&ch->done);
ret = srp_send_req(ch, multich);
if (ret)
- return ret;
+ goto out;
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
- return ret;
+ goto out;
/*
* The CM event handling code will set status to
@@ -1011,15 +1011,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
* back, or SRP_DLID_REDIRECT if we get a lid/qp
* redirect REJ back.
*/
- switch (ch->status) {
+ ret = ch->status;
+ switch (ret) {
case 0:
ch->connected = true;
- return 0;
+ goto out;
case SRP_PORT_REDIRECT:
ret = srp_lookup_path(ch);
if (ret)
- return ret;
+ goto out;
break;
case SRP_DLID_REDIRECT:
@@ -1028,13 +1029,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
case SRP_STALE_CONN:
shost_printk(KERN_ERR, target->scsi_host, PFX
"giving up on stale connection\n");
- ch->status = -ECONNRESET;
- return ch->status;
+ ret = -ECONNRESET;
+ goto out;
default:
- return ch->status;
+ goto out;
}
}
+
+out:
+ return ret <= 0 ? ret : -ENODEV;
}
static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
@@ -1309,7 +1313,7 @@ reset_state:
}
static int srp_map_finish_fr(struct srp_map_state *state,
- struct srp_rdma_ch *ch)
+ struct srp_rdma_ch *ch, int sg_nents)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
@@ -1324,10 +1328,10 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (state->sg_nents == 0)
+ if (sg_nents == 0)
return 0;
- if (state->sg_nents == 1 && target->global_mr) {
+ if (sg_nents == 1 && target->global_mr) {
srp_map_desc(state, sg_dma_address(state->sg),
sg_dma_len(state->sg),
target->global_mr->rkey);
@@ -1341,8 +1345,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
rkey = ib_inc_rkey(desc->mr->rkey);
ib_update_fast_reg_key(desc->mr, rkey);
- n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
- dev->mr_page_size);
+ n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
if (unlikely(n < 0))
return n;
@@ -1448,16 +1451,15 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
state->fr.next = req->fr_list;
state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
state->sg = scat;
- state->sg_nents = scsi_sg_count(req->scmnd);
- while (state->sg_nents) {
+ while (count) {
int i, n;
- n = srp_map_finish_fr(state, ch);
+ n = srp_map_finish_fr(state, ch, count);
if (unlikely(n < 0))
return n;
- state->sg_nents -= n;
+ count -= n;
for (i = 0; i < n; i++)
state->sg = sg_next(state->sg);
}
@@ -1517,10 +1519,12 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
if (dev->use_fast_reg) {
state.sg = idb_sg;
- state.sg_nents = 1;
sg_set_buf(idb_sg, req->indirect_desc, idb_len);
idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
- ret = srp_map_finish_fr(&state, ch);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ idb_sg->dma_length = idb_sg->length; /* hack^2 */
+#endif
+ ret = srp_map_finish_fr(&state, ch, 1);
if (ret < 0)
return ret;
} else if (dev->use_fmr) {
@@ -1655,7 +1659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
return ret;
req->nmdesc++;
} else {
- idb_rkey = target->global_mr->rkey;
+ idb_rkey = cpu_to_be32(target->global_mr->rkey);
}
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 87a2a919dc43..f6af531f9f32 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -300,10 +300,7 @@ struct srp_map_state {
dma_addr_t base_dma_addr;
u32 dma_len;
u32 total_len;
- union {
- unsigned int npages;
- int sg_nents;
- };
+ unsigned int npages;
unsigned int nmdesc;
unsigned int ndesc;
};
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f1042daef9ad..ac7387686ddc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
- pteval = (sg_phys(sg) & PAGE_MASK) | prot;
+ pteval = page_to_phys(sg_page(sg)) | prot;
phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg));
- sg->dma_address = sg_phys(sg);
+ sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
sg->dma_length = sg->length;
}
return nelems;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index abae363c7b9b..0e3b0092ec92 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
for_each_sg(sg, s, nents, i) {
- phys_addr_t phys = sg_phys(s);
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
/*
* We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 598ab3f0e0ac..cadf104e3074 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -210,7 +210,12 @@ int __init fpga_irq_of_init(struct device_node *node,
parent_irq = -1;
}
+#ifdef CONFIG_ARCH_VERSATILE
+ fpga_irq_init(base, node->name, IRQ_SIC_START, parent_irq, valid_mask,
+ node);
+#else
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
+#endif
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 375be509e95f..2a506fe0c8a4 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -67,8 +67,7 @@ static int write_modem(struct cardstate *cs)
struct sk_buff *skb = bcs->tx_skb;
int sent = -EOPNOTSUPP;
- if (!tty || !tty->driver || !skb)
- return -EINVAL;
+ WARN_ON(!tty || !tty->ops || !skb);
if (!skb->len) {
dev_kfree_skb_any(skb);
@@ -109,8 +108,7 @@ static int send_cb(struct cardstate *cs)
unsigned long flags;
int sent = 0;
- if (!tty || !tty->driver)
- return -EFAULT;
+ WARN_ON(!tty || !tty->ops);
cb = cs->cmdbuf;
if (!cb)
@@ -370,19 +368,18 @@ static void gigaset_freecshw(struct cardstate *cs)
tasklet_kill(&cs->write_tasklet);
if (!cs->hw.ser)
return;
- dev_set_drvdata(&cs->hw.ser->dev.dev, NULL);
platform_device_unregister(&cs->hw.ser->dev);
- kfree(cs->hw.ser);
- cs->hw.ser = NULL;
}
static void gigaset_device_release(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct cardstate *cs = dev_get_drvdata(dev);
- /* adapted from platform_device_release() in drivers/base/platform.c */
- kfree(dev->platform_data);
- kfree(pdev->resource);
+ if (!cs)
+ return;
+ dev_set_drvdata(dev, NULL);
+ kfree(cs->hw.ser);
+ cs->hw.ser = NULL;
}
/*
@@ -432,7 +429,9 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
struct tty_struct *tty = cs->hw.ser->tty;
unsigned int set, clear;
- if (!tty || !tty->driver || !tty->ops->tiocmset)
+ WARN_ON(!tty || !tty->ops);
+ /* tiocmset is an optional tty driver method */
+ if (!tty->ops->tiocmset)
return -EINVAL;
set = new_state & ~old_state;
clear = old_state & ~new_state;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index a77eea594b69..cb428b9ee441 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -1170,7 +1170,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
if (ipac->type & IPAC_TYPE_IPACX) {
ista = ReadIPAC(ipac, ISACX_ISTA);
- while (ista && cnt--) {
+ while (ista && --cnt) {
pr_debug("%s: ISTA %02x\n", ipac->name, ista);
if (ista & IPACX__ICA)
ipac_irq(&ipac->hscx[0], ista);
@@ -1182,7 +1182,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
}
} else if (ipac->type & IPAC_TYPE_IPAC) {
ista = ReadIPAC(ipac, IPAC_ISTA);
- while (ista && cnt--) {
+ while (ista && --cnt) {
pr_debug("%s: ISTA %02x\n", ipac->name, ista);
if (ista & (IPAC__ICD | IPAC__EXD)) {
istad = ReadISAC(isac, ISAC_ISTA);
@@ -1200,7 +1200,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
ista = ReadIPAC(ipac, IPAC_ISTA);
}
} else if (ipac->type & IPAC_TYPE_HSCX) {
- while (cnt) {
+ while (--cnt) {
ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off);
pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista);
if (ista)
@@ -1211,7 +1211,6 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
mISDNisac_irq(isac, istad);
if (0 == (ista | istad))
break;
- cnt--;
}
}
if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index a16bf56d3f28..85a339030e4b 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -18,6 +18,7 @@ if NVM
config NVM_DEBUG
bool "Open-Channel SSD debugging support"
+ default n
---help---
Exposes a debug management interface to create/remove targets at:
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 86ce887b2ed6..8f41b245cd55 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(nvm_unregister_target);
void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
dma_addr_t *dma_handler)
{
- return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags,
+ return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
@@ -97,15 +97,47 @@ static struct nvmm_type *nvm_find_mgr_type(const char *name)
return NULL;
}
+struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
+{
+ struct nvmm_type *mt;
+ int ret;
+
+ lockdep_assert_held(&nvm_lock);
+
+ list_for_each_entry(mt, &nvm_mgrs, list) {
+ ret = mt->register_mgr(dev);
+ if (ret < 0) {
+ pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
+ ret, dev->name);
+ return NULL; /* initialization failed */
+ } else if (ret > 0)
+ return mt;
+ }
+
+ return NULL;
+}
+
int nvm_register_mgr(struct nvmm_type *mt)
{
+ struct nvm_dev *dev;
int ret = 0;
down_write(&nvm_lock);
- if (nvm_find_mgr_type(mt->name))
+ if (nvm_find_mgr_type(mt->name)) {
ret = -EEXIST;
- else
+ goto finish;
+ } else {
list_add(&mt->list, &nvm_mgrs);
+ }
+
+ /* try to register media mgr if any device have none configured */
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ if (dev->mt)
+ continue;
+
+ dev->mt = nvm_init_mgr(dev);
+ }
+finish:
up_write(&nvm_lock);
return ret;
@@ -123,26 +155,6 @@ void nvm_unregister_mgr(struct nvmm_type *mt)
}
EXPORT_SYMBOL(nvm_unregister_mgr);
-/* register with device with a supported manager */
-static int register_mgr(struct nvm_dev *dev)
-{
- struct nvmm_type *mt;
- int ret = 0;
-
- list_for_each_entry(mt, &nvm_mgrs, list) {
- ret = mt->register_mgr(dev);
- if (ret > 0) {
- dev->mt = mt;
- break; /* successfully initialized */
- }
- }
-
- if (!ret)
- pr_info("nvm: no compatible nvm manager found.\n");
-
- return ret;
-}
-
static struct nvm_dev *nvm_find_nvm_dev(const char *name)
{
struct nvm_dev *dev;
@@ -246,7 +258,7 @@ static int nvm_init(struct nvm_dev *dev)
if (!dev->q || !dev->ops)
return ret;
- if (dev->ops->identity(dev->q, &dev->identity)) {
+ if (dev->ops->identity(dev, &dev->identity)) {
pr_err("nvm: device could not be identified\n");
goto err;
}
@@ -271,14 +283,6 @@ static int nvm_init(struct nvm_dev *dev)
goto err;
}
- down_write(&nvm_lock);
- ret = register_mgr(dev);
- up_write(&nvm_lock);
- if (ret < 0)
- goto err;
- if (!ret)
- return 0;
-
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
dev->name, dev->sec_per_pg, dev->nr_planes,
dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
@@ -326,8 +330,7 @@ int nvm_register(struct request_queue *q, char *disk_name,
}
if (dev->ops->max_phys_sect > 1) {
- dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
- "ppalist");
+ dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
if (!dev->ppalist_pool) {
pr_err("nvm: could not create ppa pool\n");
ret = -ENOMEM;
@@ -335,7 +338,9 @@ int nvm_register(struct request_queue *q, char *disk_name,
}
}
+ /* register device with a supported media manager */
down_write(&nvm_lock);
+ dev->mt = nvm_init_mgr(dev);
list_add(&dev->devices, &nvm_devices);
up_write(&nvm_lock);
@@ -380,19 +385,13 @@ static int nvm_create_target(struct nvm_dev *dev,
struct nvm_tgt_type *tt;
struct nvm_target *t;
void *targetdata;
- int ret = 0;
- down_write(&nvm_lock);
if (!dev->mt) {
- ret = register_mgr(dev);
- if (!ret)
- ret = -ENODEV;
- if (ret < 0) {
- up_write(&nvm_lock);
- return ret;
- }
+ pr_info("nvm: device has no media manager registered.\n");
+ return -ENODEV;
}
+ down_write(&nvm_lock);
tt = nvm_find_target_type(create->tgttype);
if (!tt) {
pr_err("nvm: target type %s not found\n", create->tgttype);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 35dde84b71e9..f434e89e1c7a 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -195,7 +195,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
}
if (dev->ops->get_l2p_tbl) {
- ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
+ ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
gennvm_block_map, dev);
if (ret) {
pr_err("gennvm: could not read L2P table.\n");
@@ -219,6 +219,9 @@ static int gennvm_register(struct nvm_dev *dev)
struct gen_nvm *gn;
int ret;
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
if (!gn)
return -ENOMEM;
@@ -242,12 +245,14 @@ static int gennvm_register(struct nvm_dev *dev)
return 1;
err:
gennvm_free(dev);
+ module_put(THIS_MODULE);
return ret;
}
static void gennvm_unregister(struct nvm_dev *dev)
{
gennvm_free(dev);
+ module_put(THIS_MODULE);
}
static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
@@ -262,14 +267,11 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
if (list_empty(&lun->free_list)) {
pr_err_ratelimited("gennvm: lun %u have no free pages available",
lun->vlun.id);
- spin_unlock(&vlun->lock);
goto out;
}
- while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) {
- spin_unlock(&vlun->lock);
+ if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
goto out;
- }
blk = list_first_entry(&lun->free_list, struct nvm_block, list);
list_move_tail(&blk->list, &lun->used_list);
@@ -278,8 +280,8 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
lun->vlun.nr_free_blocks--;
lun->vlun.nr_inuse_blocks++;
- spin_unlock(&vlun->lock);
out:
+ spin_unlock(&vlun->lock);
return blk;
}
@@ -349,7 +351,7 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
gennvm_generic_to_addr_mode(dev, rqd);
rqd->dev = dev;
- return dev->ops->submit_io(dev->q, rqd);
+ return dev->ops->submit_io(dev, rqd);
}
static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
@@ -385,7 +387,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
if (!dev->ops->set_bb_tbl)
return;
- if (dev->ops->set_bb_tbl(dev->q, rqd, 1))
+ if (dev->ops->set_bb_tbl(dev, rqd, 1))
return;
gennvm_addr_to_generic_mode(dev, rqd);
@@ -453,7 +455,7 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
gennvm_generic_to_addr_mode(dev, &rqd);
- ret = dev->ops->erase_block(dev->q, &rqd);
+ ret = dev->ops->erase_block(dev, &rqd);
if (plane_cnt)
nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 75e59c3a3f96..134e4faba482 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -182,7 +182,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
struct nvm_block *blk;
struct rrpc_block *rblk;
- blk = nvm_get_blk(rrpc->dev, rlun->parent, 0);
+ blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
if (!blk)
return NULL;
@@ -202,6 +202,20 @@ static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
nvm_put_blk(rrpc->dev, rblk->parent);
}
+static void rrpc_put_blks(struct rrpc *rrpc)
+{
+ struct rrpc_lun *rlun;
+ int i;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ rlun = &rrpc->luns[i];
+ if (rlun->cur)
+ rrpc_put_blk(rrpc, rlun->cur);
+ if (rlun->gc_cur)
+ rrpc_put_blk(rrpc, rlun->gc_cur);
+ }
+}
+
static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
{
int next = atomic_inc_return(&rrpc->next_lun);
@@ -1002,7 +1016,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
+ ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
@@ -1224,18 +1238,21 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
rblk = rrpc_get_blk(rrpc, rlun, 0);
if (!rblk)
- return -EINVAL;
+ goto err;
rrpc_set_lun_cur(rlun, rblk);
/* Emergency gc block */
rblk = rrpc_get_blk(rrpc, rlun, 1);
if (!rblk)
- return -EINVAL;
+ goto err;
rlun->gc_cur = rblk;
}
return 0;
+err:
+ rrpc_put_blks(rrpc);
+ return -EINVAL;
}
static struct nvm_tgt_type tt_rrpc;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 1fa45695b68a..c219a053c7f6 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1207,6 +1207,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
dm_block_t held_root;
/*
+ * We commit to ensure the btree roots which we increment in a
+ * moment are up to date.
+ */
+ __commit_transaction(pmd);
+
+ /*
* Copy the superblock.
*/
dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
@@ -1538,7 +1544,7 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
{
int r;
- unsigned count;
+ unsigned count, total_count = 0;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[1] = { td->id };
__le64 value;
@@ -1561,11 +1567,29 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
if (r)
return r;
- r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
- if (r)
- return r;
+ /*
+ * Remove leaves stops at the first unmapped entry, so we have to
+ * loop round finding mapped ranges.
+ */
+ while (begin < end) {
+ r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
+ if (r == -ENODATA)
+ break;
+
+ if (r)
+ return r;
+
+ if (begin >= end)
+ break;
+
+ r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
+ if (r)
+ return r;
+
+ total_count += count;
+ }
- td->mapped_blocks -= count;
+ td->mapped_blocks -= total_count;
td->changed = 1;
/*
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index c573402033b2..b1ced58eb5e1 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -63,6 +63,11 @@ int lower_bound(struct btree_node *n, uint64_t key)
return bsearch(n, key, 0);
}
+static int upper_bound(struct btree_node *n, uint64_t key)
+{
+ return bsearch(n, key, 1);
+}
+
void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt)
{
@@ -252,6 +257,16 @@ static void pop_frame(struct del_stack *s)
dm_tm_unlock(s->tm, f->b);
}
+static void unlock_all_frames(struct del_stack *s)
+{
+ struct frame *f;
+
+ while (unprocessed_frames(s)) {
+ f = s->spine + s->top--;
+ dm_tm_unlock(s->tm, f->b);
+ }
+}
+
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
{
int r;
@@ -308,9 +323,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
pop_frame(s);
}
}
-
out:
+ if (r) {
+ /* cleanup all frames of del_stack */
+ unlock_all_frames(s);
+ }
kfree(s);
+
return r;
}
EXPORT_SYMBOL_GPL(dm_btree_del);
@@ -392,6 +411,82 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
}
EXPORT_SYMBOL_GPL(dm_btree_lookup);
+static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
+ uint64_t key, uint64_t *rkey, void *value_le)
+{
+ int r, i;
+ uint32_t flags, nr_entries;
+ struct dm_block *node;
+ struct btree_node *n;
+
+ r = bn_read_lock(info, root, &node);
+ if (r)
+ return r;
+
+ n = dm_block_data(node);
+ flags = le32_to_cpu(n->header.flags);
+ nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (flags & INTERNAL_NODE) {
+ i = lower_bound(n, key);
+ if (i < 0 || i >= nr_entries) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
+ if (r == -ENODATA && i < (nr_entries - 1)) {
+ i++;
+ r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
+ }
+
+ } else {
+ i = upper_bound(n, key);
+ if (i < 0 || i >= nr_entries) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ *rkey = le64_to_cpu(n->keys[i]);
+ memcpy(value_le, value_ptr(n, i), info->value_type.size);
+ }
+out:
+ dm_tm_unlock(info->tm, node);
+ return r;
+}
+
+int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, uint64_t *rkey, void *value_le)
+{
+ unsigned level;
+ int r = -ENODATA;
+ __le64 internal_value_le;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels - 1u; level++) {
+ r = btree_lookup_raw(&spine, root, keys[level],
+ lower_bound, rkey,
+ &internal_value_le, sizeof(uint64_t));
+ if (r)
+ goto out;
+
+ if (*rkey != keys[level]) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ root = le64_to_cpu(internal_value_le);
+ }
+
+ r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
+out:
+ exit_ro_spine(&spine);
+ return r;
+}
+
+EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
+
/*
* Splits a node by creating a sibling node and shifting half the nodes
* contents across. Assumes there is a parent node, and it has room for
@@ -473,8 +568,10 @@ static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
r = insert_at(sizeof(__le64), pn, parent_index + 1,
le64_to_cpu(rn->keys[0]), &location);
- if (r)
+ if (r) {
+ unlock_block(s->info, right);
return r;
+ }
if (key < le64_to_cpu(rn->keys[0])) {
unlock_block(s->info, right);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index 11d8cf78621d..c74301fa5a37 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -110,6 +110,13 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value_le);
/*
+ * Tries to find the first key where the bottom level key is >= to that
+ * given. Useful for skipping empty sections of the btree.
+ */
+int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, uint64_t *rkey, void *value_le);
+
+/*
* Insertion (or overwrite an existing value). O(ln(n))
*/
int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
@@ -135,9 +142,10 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root);
/*
- * Removes values between 'keys' and keys2, where keys2 is keys with the
- * final key replaced with 'end_key'. 'end_key' is the one-past-the-end
- * value. 'keys' may be altered.
+ * Removes a _contiguous_ run of values starting from 'keys' and not
+ * reaching keys2 (where keys2 is keys with the final key replaced with
+ * 'end_key'). 'end_key' is the one-past-the-end value. 'keys' may be
+ * altered.
*/
int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, uint64_t end_key,
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 53091295fce9..fca6dbcf9a47 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
return 0;
}
-static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
{
struct block_op *bop;
@@ -147,6 +147,17 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
result->type = bop->type;
result->block = bop->block;
+ return 0;
+}
+
+static int brb_pop(struct bop_ring_buffer *brb)
+{
+ struct block_op *bop;
+
+ if (brb_empty(brb))
+ return -ENODATA;
+
+ bop = brb->bops + brb->begin;
brb->begin = brb_next(brb, brb->begin);
return 0;
@@ -211,7 +222,7 @@ static int apply_bops(struct sm_metadata *smm)
while (!brb_empty(&smm->uncommitted)) {
struct block_op bop;
- r = brb_pop(&smm->uncommitted, &bop);
+ r = brb_peek(&smm->uncommitted, &bop);
if (r) {
DMERR("bug in bop ring buffer");
break;
@@ -220,6 +231,8 @@ static int apply_bops(struct sm_metadata *smm)
r = commit_bop(smm, &bop);
if (r)
break;
+
+ brb_pop(&smm->uncommitted);
}
return r;
@@ -683,7 +696,6 @@ static struct dm_space_map bootstrap_ops = {
static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{
int r, i;
- enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
dm_block_t old_len = smm->ll.nr_blocks;
@@ -705,11 +717,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
* allocate any new blocks.
*/
do {
- for (i = old_len; !r && i < smm->begin; i++) {
- r = sm_ll_inc(&smm->ll, i, &ev);
- if (r)
- goto out;
- }
+ for (i = old_len; !r && i < smm->begin; i++)
+ r = add_bop(smm, BOP_INC, i);
+
+ if (r)
+ goto out;
+
old_len = smm->begin;
r = apply_bops(smm);
@@ -754,7 +767,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
{
int r;
dm_block_t i;
- enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
smm->begin = superblock + 1;
@@ -782,7 +794,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
* allocated blocks that they were built from.
*/
for (i = superblock; !r && i < smm->begin; i++)
- r = sm_ll_inc(&smm->ll, i, &ev);
+ r = add_bop(smm, BOP_INC, i);
if (r)
return r;
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index d2e75c88f4d2..f40909793490 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -497,6 +497,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
{
u64 sr = 0;
+ set_endian(sr);
if (ctx->master)
sr |= CXL_PSL_SR_An_MP;
if (mfspr(SPRN_LPCR) & LPCR_TC)
@@ -506,7 +507,6 @@ static u64 calculate_sr(struct cxl_context *ctx)
sr |= CXL_PSL_SR_An_HV;
} else {
sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
- set_endian(sr);
sr &= ~(CXL_PSL_SR_An_HV);
if (!test_tsk_thread_flag(current, TIF_32BIT))
sr |= CXL_PSL_SR_An_SF;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 970781a9e677..f6a7161e3b85 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
usleep_range(10, 15);
/* Poll Until Poll Condition */
- while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
usleep_range(500, 600);
if (!count)
@@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
/* Poll Until Poll Condition */
for (i = 0; i < pdata->tx_q_count; i++) {
count = 2000;
- while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
+ while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
MTL_Q_TQOMR, FTQ))
usleep_range(500, 600);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 2394191ad28e..a4799c1fc7d4 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
struct sk_buff *skb)
{
struct device *dev = ndev_to_dev(tx_ring->ndev);
+ struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
struct xgene_enet_raw_desc *raw_desc;
__le64 *exp_desc = NULL, *exp_bufs = NULL;
dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
@@ -419,6 +420,7 @@ out:
raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
SET_VAL(USERINFO, tx_ring->tail));
tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
+ pdata->tx_level += count;
tx_ring->tail = tail;
return count;
@@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
- struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
- u32 tx_level, cq_level;
+ u32 tx_level = pdata->tx_level;
int count;
- tx_level = pdata->ring_ops->len(tx_ring);
- cq_level = pdata->ring_ops->len(cp_ring);
- if (unlikely(tx_level > pdata->tx_qcnt_hi ||
- cq_level > pdata->cp_qcnt_hi)) {
+ if (tx_level < pdata->txc_level)
+ tx_level += ((typeof(pdata->tx_level))~0U);
+
+ if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
@@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
struct xgene_enet_raw_desc *raw_desc, *exp_desc;
u16 head = ring->head;
u16 slots = ring->slots - 1;
- int ret, count = 0, processed = 0;
+ int ret, desc_count, count = 0, processed = 0;
+ bool is_completion;
do {
raw_desc = &ring->raw_desc[head];
+ desc_count = 0;
+ is_completion = false;
exp_desc = NULL;
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
break;
@@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
}
dma_rmb();
count++;
+ desc_count++;
}
- if (is_rx_desc(raw_desc))
+ if (is_rx_desc(raw_desc)) {
ret = xgene_enet_rx_frame(ring, raw_desc);
- else
+ } else {
ret = xgene_enet_tx_completion(ring, raw_desc);
+ is_completion = true;
+ }
xgene_enet_mark_desc_slot_empty(raw_desc);
if (exp_desc)
xgene_enet_mark_desc_slot_empty(exp_desc);
head = (head + 1) & slots;
count++;
+ desc_count++;
processed++;
+ if (is_completion)
+ pdata->txc_level += desc_count;
if (ret)
break;
@@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head;
- if (netif_queue_stopped(ring->ndev)) {
- if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
- netif_wake_queue(ring->ndev);
- }
+ if (netif_queue_stopped(ring->ndev))
+ netif_start_queue(ring->ndev);
}
return processed;
@@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
pdata->tx_ring->cp_ring = cp_ring;
pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
- pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
- pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
- pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
+ pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 054caf055f0a..70d5b62c125a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -155,11 +155,11 @@ struct xgene_enet_pdata {
enum xgene_enet_id enet_id;
struct xgene_enet_desc_ring *tx_ring;
struct xgene_enet_desc_ring *rx_ring;
+ u16 tx_level;
+ u16 txc_level;
char *dev_name;
u32 rx_buff_cnt;
u32 tx_qcnt_hi;
- u32 cp_qcnt_hi;
- u32 cp_qcnt_low;
u32 rx_irq;
u32 txc_irq;
u8 cq_cnt;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2795d6db10e1..8b5988e210d5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
8 * 4;
- ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
- &ring_header->dma);
+ ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
+ &ring_header->dma, GFP_KERNEL);
if (unlikely(!ring_header->desc)) {
- dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
+ dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
goto err_nomem;
}
- memset(ring_header->desc, 0, ring_header->size);
/* init TPD ring */
tpd_ring[0].dma = roundup(ring_header->dma, 8);
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
index a3c7106fdf85..8ba7f8ff3434 100644
--- a/drivers/net/ethernet/aurora/Kconfig
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -13,6 +13,7 @@ if NET_VENDOR_AURORA
config AURORA_NB8800
tristate "Aurora AU-NB8800 support"
+ depends on HAS_DMA
select PHYLIB
help
Support for the AU-NB8800 gigabit Ethernet controller.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 11446adc03cc..443ef92f9803 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
req.ver_upd = DRV_VER_UPD;
if (BNXT_PF(bp)) {
- unsigned long vf_req_snif_bmap[4];
+ DECLARE_BITMAP(vf_req_snif_bmap, 256);
u32 *data = (u32 *)vf_req_snif_bmap;
- memset(vf_req_snif_bmap, 0, 32);
+ memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
__set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
- for (i = 0; i < 8; i++) {
- req.vf_req_fwd[i] = cpu_to_le32(*data);
- data++;
- }
+ for (i = 0; i < 8; i++)
+ req.vf_req_fwd[i] = cpu_to_le32(data[i]);
+
req.enables |=
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
@@ -4601,7 +4600,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bp->nge_port_cnt = 1;
}
- bp->state = BNXT_STATE_OPEN;
+ set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
/* Enable TX queues */
bnxt_tx_enable(bp);
@@ -4677,8 +4676,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* Change device state to avoid TX queue wake up's */
bnxt_tx_disable(bp);
- bp->state = BNXT_STATE_CLOSED;
- cancel_work_sync(&bp->sp_task);
+ clear_bit(BNXT_STATE_OPEN, &bp->state);
+ smp_mb__after_atomic();
+ while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
+ msleep(20);
/* Flush rings before disabling interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
@@ -5028,8 +5029,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
static void bnxt_reset_task(struct bnxt *bp)
{
bnxt_dbg_dump_states(bp);
- if (netif_running(bp->dev))
- bnxt_tx_disable(bp); /* prevent tx timout again */
+ if (netif_running(bp->dev)) {
+ bnxt_close_nic(bp, false, false);
+ bnxt_open_nic(bp, false, false);
+ }
}
static void bnxt_tx_timeout(struct net_device *dev)
@@ -5079,8 +5082,12 @@ static void bnxt_sp_task(struct work_struct *work)
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
int rc;
- if (bp->state != BNXT_STATE_OPEN)
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ smp_mb__after_atomic();
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
return;
+ }
if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
bnxt_cfg_rx_mode(bp);
@@ -5104,8 +5111,19 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
}
- if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
+ /* bnxt_reset_task() calls bnxt_close_nic() which waits
+ * for BNXT_STATE_IN_SP_TASK to clear.
+ */
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
bnxt_reset_task(bp);
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_unlock();
+ }
+
+ smp_mb__before_atomic();
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -5184,7 +5202,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->timer.function = bnxt_timer;
bp->current_interval = BNXT_TIMER_INTERVAL;
- bp->state = BNXT_STATE_CLOSED;
+ clear_bit(BNXT_STATE_OPEN, &bp->state);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 674bc5159b91..f199f4cc8ffe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -925,9 +925,9 @@ struct bnxt {
struct timer_list timer;
- int state;
-#define BNXT_STATE_CLOSED 0
-#define BNXT_STATE_OPEN 1
+ unsigned long state;
+#define BNXT_STATE_OPEN 0
+#define BNXT_STATE_IN_SP_TASK 1
struct bnxt_irq *irq_tbl;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 7a9af2887d8e..ea044bbcd384 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -21,7 +21,7 @@
#ifdef CONFIG_BNXT_SRIOV
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{
- if (bp->state != BNXT_STATE_OPEN) {
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
netdev_err(bp->dev, "vf ndo called though PF is down\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 9f80de4d5016..4dded90076c8 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -37,7 +37,6 @@ struct nicpf {
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
u8 vf_lmac_map[MAX_LMAC];
- u8 lmac_cnt;
struct delayed_work dwork;
struct workqueue_struct *check_link;
u8 link[MAX_LMAC];
@@ -275,7 +274,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
u64 lmac_credit;
nic->num_vf_en = 0;
- nic->lmac_cnt = 0;
for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
if (!(bgx_map & (1 << bgx)))
@@ -285,7 +283,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic->vf_lmac_map[next_bgx_lmac++] =
NIC_SET_VF_LMAC_MAP(bgx, lmac);
nic->num_vf_en += lmac_cnt;
- nic->lmac_cnt += lmac_cnt;
/* Program LMAC credits */
lmac_credit = (1ull << 1); /* channel credit enable */
@@ -613,6 +610,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
return 0;
}
+static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
+{
+ int bgx, lmac;
+
+ nic->vf_enabled[vf] = enable;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -712,29 +724,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
- nic->vf_enabled[vf] = true;
- if (vf >= nic->lmac_cnt)
- goto unlock;
-
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-
- bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
+ nic_enable_vf(nic, vf, true);
goto unlock;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
- nic->vf_enabled[vf] = false;
if (vf >= nic->num_vf_en)
nic->sqs_used[vf - nic->num_vf_en] = false;
nic->pqs_vf[vf] = 0;
-
- if (vf >= nic->lmac_cnt)
- break;
-
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-
- bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
+ nic_enable_vf(nic, vf, false);
break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -953,7 +950,7 @@ static void nic_poll_for_link(struct work_struct *work)
mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
- for (vf = 0; vf < nic->lmac_cnt; vf++) {
+ for (vf = 0; vf < nic->num_vf_en; vf++) {
/* Poll only if VF is UP */
if (!nic->vf_enabled[vf])
continue;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 63c2bcf8031a..b1026689b78f 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
*reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
else { /* !dst_is_aligned */
for (i = 0; i < len; i++, reg++) {
- u32 buf =
- nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
-
- /* to accommodate word-unaligned address of "reg"
- * we have to do memcpy_toio() instead of simple "=".
- */
- memcpy_toio((void __iomem *)reg, &buf, sizeof(buf));
+ u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+ put_unaligned(buf, reg);
}
}
/* copy last bytes (if any) */
if (last) {
u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
-
- memcpy_toio((void __iomem *)reg, &buf, last);
+ memcpy((u8*)reg, &buf, last);
}
}
@@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev,
struct nps_enet_tx_ctl tx_ctrl;
short length = skb->len;
u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
- u32 *src = (u32 *)virt_to_phys(skb->data);
+ u32 *src = (void *)skb->data;
bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
tx_ctrl.value = 0;
@@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev,
if (src_is_aligned)
for (i = 0; i < len; i++, src++)
nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
- else { /* !src_is_aligned */
- for (i = 0; i < len; i++, src++) {
- u32 buf;
-
- /* to accommodate word-unaligned address of "src"
- * we have to do memcpy_fromio() instead of simple "="
- */
- memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf));
- nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf);
- }
- }
+ else /* !src_is_aligned */
+ for (i = 0; i < len; i++, src++)
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
+ get_unaligned(src));
+
/* Write the length of the Frame */
tx_ctrl.nt = length;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 08f5b911d96b..52e0091b4fb2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
cbd_t __iomem *prev_bd;
cbd_t __iomem *last_tx_bd;
- last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
+ last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t));
/* get the current bd held in TBPTR and scan back from this point */
recheck_bd = curr_tbptr = (cbd_t __iomem *)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 55c36230e176..40071dad1c57 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -464,7 +464,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
* address). Print error message but continue anyway.
*/
if ((void *)tbipa > priv->map + resource_size(&res) - 4)
- dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n",
+ dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
((void *)tbipa - priv->map) + 4);
iowrite32be(be32_to_cpup(prop), tbipa);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4ce60e0c8341..4e394f75261e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -891,7 +891,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
- FSL_GIANFAR_DEV_HAS_TIMER;
+ FSL_GIANFAR_DEV_HAS_TIMER |
+ FSL_GIANFAR_DEV_HAS_RX_FILER;
err = of_property_read_string(np, "phy-connection-type", &ctype);
@@ -1393,8 +1394,9 @@ static int gfar_probe(struct platform_device *ofdev)
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
- /* always enable rx filer */
- priv->rx_filer_enable = 1;
+ /* Always enable rx filer if available */
+ priv->rx_filer_enable =
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
/* use pritority h/w tx queue scheduling for single queue devices */
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index f266b20f9ef5..cb77667971a7 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -923,6 +923,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
+#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000
#if (MAXGROUPS == 2)
#define DEFAULT_MAPPING 0xAA
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 636b205a2366..1c33bd06bd5c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1376,12 +1376,8 @@ int hns_dsaf_set_mac_uc_entry(
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
MAC_IS_BROADCAST(mac_entry->addr) ||
MAC_IS_MULTICAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
- dsaf_dev->ae_dev.name, mac_entry->addr[0],
- mac_entry->addr[1], mac_entry->addr[2],
- mac_entry->addr[3], mac_entry->addr[4],
- mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
return -EINVAL;
}
@@ -1448,12 +1444,8 @@ int hns_dsaf_set_mac_mc_entry(
/* mac addr check */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
- dsaf_dev->ae_dev.name, mac_entry->addr[0],
- mac_entry->addr[1], mac_entry->addr[2],
- mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
return -EINVAL;
}
@@ -1527,11 +1519,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
/*chechk mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
- mac_entry->addr[0], mac_entry->addr[1],
- mac_entry->addr[2], mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n",
+ mac_entry->addr);
return -EINVAL;
}
@@ -1614,9 +1603,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
/*check mac addr */
if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
- dev_err(dsaf_dev->dev,
- "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n",
+ addr);
return -EINVAL;
}
@@ -1680,11 +1668,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/*check mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
- mac_entry->addr[0], mac_entry->addr[1],
- mac_entry->addr[2], mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n",
+ mac_entry->addr);
return -EINVAL;
}
@@ -1761,11 +1746,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
/* check macaddr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
MAC_IS_BROADCAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- mac_entry->addr[0], mac_entry->addr[1],
- mac_entry->addr[2], mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
+ mac_entry->addr);
return -EINVAL;
}
@@ -1812,11 +1794,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
/*check mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
MAC_IS_BROADCAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- mac_entry->addr[0], mac_entry->addr[1],
- mac_entry->addr[2], mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
+ mac_entry->addr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index c4d7c26952c4..5d1b746e141d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -956,7 +956,7 @@
#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
-static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
{
u8 __iomem *reg_addr = ACCESS_ONCE(base);
@@ -966,7 +966,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
#define dsaf_write_dev(a, reg, value) \
dsaf_write_reg((a)->io_base, (reg), (value))
-static inline u32 dsaf_read_reg(u8 *base, u32 reg)
+static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
{
u8 __iomem *reg_addr = ACCESS_ONCE(base);
@@ -985,8 +985,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg)
#define dsaf_set_bit(origin, shift, val) \
dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
-static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
- u32 val)
+static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+ u32 shift, u32 val)
{
u32 origin = dsaf_read_reg(base, reg);
@@ -1005,7 +1005,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
#define dsaf_get_bit(origin, shift) \
dsaf_get_field((origin), (1ull << (shift)), (shift))
-static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+ u32 shift)
{
u32 origin;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0ff8f01e57ee..1fd5ea82a9bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_exit;
}
- /* initialize locks */
- mutex_init(&hw->aq.asq_mutex);
- mutex_init(&hw->aq.arq_mutex);
-
/* Set up register offsets */
i40e_adminq_init_regs(hw);
@@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
- /* destroy the locks */
-
if (hw->nvm_buff.va)
i40e_free_virt_mem(hw, &hw->nvm_buff);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 23211e08eecb..1598fb31477a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -10607,6 +10607,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up a default setting for link flow control */
pf->hw.fc.requested_mode = I40E_FC_NONE;
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
err = i40e_init_adminq(hw);
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
@@ -11024,7 +11030,6 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
- i40e_fdir_teardown(pf);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -11067,6 +11072,10 @@ static void i40e_remove(struct pci_dev *pdev)
"Failed to destroy the Admin Queue resources: %d\n",
ret_code);
+ /* destroy the locks only once, here */
+ mutex_destroy(&hw->aq.arq_mutex);
+ mutex_destroy(&hw->aq.asq_mutex);
+
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
i40e_clear_interrupt_scheme(pf);
for (i = 0; i < pf->num_alloc_vsi; i++) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index fd123ca60761..3f65e39b3fe4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
goto init_adminq_exit;
}
- /* initialize locks */
- mutex_init(&hw->aq.asq_mutex);
- mutex_init(&hw->aq.arq_mutex);
-
/* Set up register offsets */
i40e_adminq_init_regs(hw);
@@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
- /* destroy the locks */
-
if (hw->nvm_buff.va)
i40e_free_virt_mem(hw, &hw->nvm_buff);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4d05ff6f0423..a0bbb6b9f5aa 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2666,6 +2666,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2822,6 +2828,10 @@ static void i40evf_remove(struct pci_dev *pdev)
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
+ /* destroy the locks only once, here */
+ mutex_destroy(&hw->aq.arq_mutex);
+ mutex_destroy(&hw->aq.asq_mutex);
+
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fca35aa90d0f..c5c0fb4ddf9f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8131,6 +8131,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
*/
if (netif_running(dev))
ixgbe_close(dev);
+ else
+ ixgbe_reset(adapter);
+
ixgbe_clear_interrupt_scheme(adapter);
#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d9884fd15b45..a4beccf1fd46 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
}
/* Free all buffers from the pool */
-static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
+static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool)
{
int i;
for (i = 0; i < bm_pool->buf_num; i++) {
+ dma_addr_t buf_phys_addr;
u32 vaddr;
/* Get buffer virtual address (indirect access) */
- mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+ buf_phys_addr = mvpp2_read(priv,
+ MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+
+ dma_unmap_single(dev, buf_phys_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
if (!vaddr)
break;
dev_kfree_skb_any((struct sk_buff *)vaddr);
@@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
{
u32 val;
- mvpp2_bm_bufs_free(priv, bm_pool);
+ mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
if (bm_pool->buf_num) {
WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
return 0;
@@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
MVPP2_BM_LONG_BUF_NUM :
MVPP2_BM_SHORT_BUF_NUM;
else
- mvpp2_bm_bufs_free(port->priv, new_pool);
+ mvpp2_bm_bufs_free(port->dev->dev.parent,
+ port->priv, new_pool);
new_pool->pkt_size = pkt_size;
@@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
/* Update BM pool with new buffer size */
- mvpp2_bm_bufs_free(port->priv, port_pool);
+ mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
if (port_pool->buf_num) {
WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
return -EIO;
@@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
mvpp2_txq_inc_get(txq_pcpu);
- if (!skb)
- continue;
-
dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
skb_headlen(skb), DMA_TO_DEVICE);
+ if (!skb)
+ continue;
dev_kfree_skb_any(skb);
}
}
@@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_rx_queue *rxq)
{
struct net_device *dev = port->dev;
- int rx_received, rx_filled, i;
+ int rx_received;
+ int rx_done = 0;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
@@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
if (rx_todo > rx_received)
rx_todo = rx_received;
- rx_filled = 0;
- for (i = 0; i < rx_todo; i++) {
+ while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
struct mvpp2_bm_pool *bm_pool;
struct sk_buff *skb;
+ dma_addr_t phys_addr;
u32 bm, rx_status;
int pool, rx_bytes, err;
- rx_filled++;
+ rx_done++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
+ phys_addr = rx_desc->buf_phys_addr;
bm = mvpp2_bm_cookie_build(rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm);
@@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
* comprised by the RX descriptor.
*/
if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
+ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
+ /* Return the buffer to the pool */
mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
rx_desc->buf_cookie);
continue;
@@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
skb = (struct sk_buff *)rx_desc->buf_cookie;
+ err = mvpp2_rx_refill(port, bm_pool, bm, 0);
+ if (err) {
+ netdev_err(port->dev, "failed to refill BM pools\n");
+ goto err_drop_frame;
+ }
+
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
rcvd_pkts++;
rcvd_bytes += rx_bytes;
atomic_inc(&bm_pool->in_use);
@@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
mvpp2_rx_csum(port, rx_status, skb);
napi_gro_receive(&port->napi, skb);
-
- err = mvpp2_rx_refill(port, bm_pool, bm, 0);
- if (err) {
- netdev_err(port->dev, "failed to refill BM pools\n");
- rx_filled--;
- }
}
if (rcvd_pkts) {
@@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
/* Update Rx queue management counters */
wmb();
- mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
+ mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
return rx_todo;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 2177e56ed0be..d48d5793407d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1010,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
smp->method == IB_MGMT_METHOD_GET) || network_view) {
mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
- slave, smp->method, smp->mgmt_class,
+ slave, smp->mgmt_class, smp->method,
network_view ? "Network" : "Host",
be16_to_cpu(smp->attr_id));
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index da7f578a3fe1..b46dbe29ef6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4331,9 +4331,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
- ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
- if (ctrl->port <= 0)
+ err = mlx4_slave_convert_port(dev, slave, ctrl->port);
+ if (err <= 0)
return -EINVAL;
+ ctrl->port = err;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index ac17d8669b1a..1292c360390c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -299,6 +299,7 @@ struct qed_hwfn {
/* Flag indicating whether interrupts are enabled or not*/
bool b_int_enabled;
+ bool b_int_requested;
struct qed_mcp_info *mcp_info;
@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+
#define QED_ETH_INTERFACE_VERSION 300
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 803b190ccada..817bbd5476ff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1385,52 +1385,63 @@ err0:
return rc;
}
-static u32 qed_hw_bar_size(struct qed_dev *cdev,
- u8 bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ u8 bar_id)
{
- u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
+ u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
+ : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
- return size / cdev->num_hwfns;
+ /* Get the BAR size(in KB) from hardware given val */
+ return 1 << (val + 15);
}
int qed_hw_prepare(struct qed_dev *cdev,
int personality)
{
- int rc, i;
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ int rc;
/* Store the precompiled init data ptrs */
qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */
- rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
+ rc = qed_hw_prepare_single(p_hwfn,
+ cdev->regview,
cdev->doorbells, personality);
if (rc)
return rc;
- personality = cdev->hwfns[0].hw_info.personality;
+ personality = p_hwfn->hw_info.personality;
/* Initialize the rest of the hwfns */
- for (i = 1; i < cdev->num_hwfns; i++) {
+ if (cdev->num_hwfns > 1) {
void __iomem *p_regview, *p_doorbell;
+ u8 __iomem *addr;
+
+ /* adjust bar offset for second engine */
+ addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
+ p_regview = addr;
- p_regview = cdev->regview +
- i * qed_hw_bar_size(cdev, 0);
- p_doorbell = cdev->doorbells +
- i * qed_hw_bar_size(cdev, 1);
- rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
+ /* adjust doorbell bar offset for second engine */
+ addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
+ p_doorbell = addr;
+
+ /* prepare second hw function */
+ rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
p_doorbell, personality);
+
+ /* in case of error, need to free the previously
+ * initiliazed hwfn 0.
+ */
if (rc) {
- /* Cleanup previously initialized hwfns */
- while (--i >= 0) {
- qed_init_free(&cdev->hwfns[i]);
- qed_mcp_free(&cdev->hwfns[i]);
- qed_hw_hwfn_free(&cdev->hwfns[i]);
- }
- return rc;
+ qed_init_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
}
}
- return 0;
+ return rc;
}
void qed_hw_remove(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index de50e84902af..9cc9d62c1fec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
}
-void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode)
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
{
- int i;
-
- p_hwfn->b_int_enabled = 1;
+ int rc, i;
/* Mask non-link attentions */
for (i = 0; i < 9; i++)
qed_wr(p_hwfn, p_ptt,
MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
- /* Enable interrupt Generation */
- qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
-
/* Configure AEU signal change to produce attentions for link */
qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
/* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+ if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_slowpath_irq_req(p_hwfn);
+ if (rc != 0) {
+ DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
+ return -EINVAL;
+ }
+ p_hwfn->b_int_requested = true;
+ }
+ /* Enable interrupt Generation */
+ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+ p_hwfn->b_int_enabled = 1;
+
+ return rc;
}
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
return info->igu_sb_cnt;
}
+
+void qed_int_disable_post_isr_release(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ cdev->hwfns[i].b_int_requested = false;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 16b57518e706..51e0b09a7f47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
int *p_iov_blks);
/**
- * @file
+ * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
+ * release. The API need to be called after releasing all slowpath IRQs
+ * of the device.
+ *
+ * @param cdev
*
- * @brief Interrupt handler
*/
+void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_CAU_DEF_RX_TIMER_RES 0
#define QED_CAU_DEF_TX_TIMER_RES 0
@@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param int_mode
+ *
+ * @return int
*/
-void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode);
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
/**
* @brief - Initialize CAU status block entry
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 6b02e1134360..9d76ce249277 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
return rc;
}
-static int qed_slowpath_irq_req(struct qed_dev *cdev)
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{
- int i = 0, rc = 0;
+ struct qed_dev *cdev = hwfn->cdev;
+ int rc = 0;
+ u8 id;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- /* Request all the slowpath MSI-X vectors */
- for (i = 0; i < cdev->num_hwfns; i++) {
- snprintf(cdev->hwfns[i].name, NAME_SIZE,
- "sp-%d-%02x:%02x.%02x",
- i, cdev->pdev->bus->number,
- PCI_SLOT(cdev->pdev->devfn),
- cdev->hwfns[i].abs_pf_id);
-
- rc = request_irq(cdev->int_params.msix_table[i].vector,
- qed_msix_sp_int, 0,
- cdev->hwfns[i].name,
- cdev->hwfns[i].sp_dpc);
- if (rc)
- break;
-
- DP_VERBOSE(&cdev->hwfns[i],
- (NETIF_MSG_INTR | QED_MSG_SP),
+ id = hwfn->my_id;
+ snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
+ id, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ rc = request_irq(cdev->int_params.msix_table[id].vector,
+ qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
+ if (!rc)
+ DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
"Requested slowpath MSI-X\n");
- }
-
- if (i != cdev->num_hwfns) {
- /* Free already request MSI-X vectors */
- for (i--; i >= 0; i--) {
- unsigned int vec =
- cdev->int_params.msix_table[i].vector;
- synchronize_irq(vec);
- free_irq(cdev->int_params.msix_table[i].vector,
- cdev->hwfns[i].sp_dpc);
- }
- }
} else {
unsigned long flags = 0;
@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].b_int_requested)
+ break;
synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector,
cdev->hwfns[i].sp_dpc);
}
} else {
- free_irq(cdev->pdev->irq, cdev);
+ if (QED_LEADING_HWFN(cdev)->b_int_requested)
+ free_irq(cdev->pdev->irq, cdev);
}
+ qed_int_disable_post_isr_release(cdev);
}
static int qed_nic_stop(struct qed_dev *cdev)
@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (rc)
goto err1;
- /* Request the slowpath IRQ */
- rc = qed_slowpath_irq_req(cdev);
- if (rc)
- goto err2;
-
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev);
if (rc) {
DP_NOTICE(cdev, "Failed to allocate stream memory\n");
- goto err3;
+ goto err2;
}
/* Start the slowpath */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 7a5ce5914ace..e8df12335a97 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -363,4 +363,8 @@
0x7 << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
+#define PGLUE_B_REG_PF_BAR0_SIZE \
+ 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE \
+ 0x2aae64UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 31a1f1eb4f56..287fadfab52d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -124,8 +124,12 @@ struct qed_spq {
dma_addr_t p_phys;
struct qed_spq_entry *p_virt;
- /* Used as index for completions (returns on EQ by FW) */
- u16 echo_idx;
+#define SPQ_RING_SIZE \
+ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+
+ /* Bitmap for handling out-of-order completions */
+ DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
+ u8 comp_bitmap_idx;
/* Statistics */
u32 unlimited_pending_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 7c0b8459666e..3dd548ab8df1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -112,8 +112,6 @@ static int
qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
- p_ent->elem.hdr.echo = 0;
- p_hwfn->p_spq->echo_idx++;
p_ent->flags = 0;
switch (p_ent->comp_mode) {
@@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq,
struct qed_spq_entry *p_ent)
{
- struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ u16 echo = qed_chain_get_prod_idx(p_chain);
struct slow_path_element *elem;
struct core_db_data db;
+ p_ent->elem.hdr.echo = cpu_to_le16(echo);
elem = qed_chain_produce(p_chain);
if (!elem) {
DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
@@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_spq->comp_count = 0;
p_spq->comp_sent_count = 0;
p_spq->unlimited_pending_count = 0;
- p_spq->echo_idx = 0;
+
+ bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
+ p_spq->comp_bitmap_idx = 0;
/* SPQ cid, cannot fail */
qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
@@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) {
- struct qed_spq_entry *p_en2;
if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
return 0;
- }
+ } else {
+ struct qed_spq_entry *p_en2;
- p_en2 = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
- list_del(&p_en2->list);
+ p_en2 = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_en2->list);
+
+ /* Copy the ring element physical pointer to the new
+ * entry, since we are about to override the entire ring
+ * entry and don't want to lose the pointer.
+ */
+ p_ent->elem.data_ptr = p_en2->elem.data_ptr;
- /* Strcut assignment */
- *p_en2 = *p_ent;
+ *p_en2 = *p_ent;
- kfree(p_ent);
+ kfree(p_ent);
- p_ent = p_en2;
+ p_ent = p_en2;
+ }
}
/* entry is to be placed in 'pending' queue */
@@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
list) {
if (p_ent->elem.hdr.echo == echo) {
+ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+
list_del(&p_ent->list);
- qed_chain_return_produced(&p_spq->chain);
+ /* Avoid overriding of SPQ entries when getting
+ * out-of-order completions, by marking the completions
+ * in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+ bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
+
+ while (test_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap)) {
+ bitmap_clear(p_spq->p_comp_bitmap,
+ p_spq->comp_bitmap_idx,
+ SPQ_RING_SIZE);
+ p_spq->comp_bitmap_idx++;
+ qed_chain_return_produced(&p_spq->chain);
+ }
+
p_spq->comp_count++;
found = p_ent;
break;
}
+
+ /* This is relatively uncommon - depends on scenarios
+ * which have mutliple per-PF sent ramrods.
+ */
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
+ le16_to_cpu(echo),
+ le16_to_cpu(p_ent->elem.hdr.echo));
}
/* Release lock before callback, as callback may post
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index be7d7a62cc0d..b1a452f291ee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -246,7 +246,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
u32 state;
state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
- while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) {
+ while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) {
+ idc->vnic_wait_limit--;
msleep(1000);
state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 02b7115b6aaa..997976426799 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- int i = 3;
- while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ int i = 4;
+
+ while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
netif_err(qdev, ifup, qdev->ndev,
"Waiting for adapter UP...\n");
ssleep(1);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index ddb2c6c6ec94..689a4a5c8dcf 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
jiffies, jiffies - dev->trans_start);
qca->net_dev->stats.tx_errors++;
- /* wake the queue if there is room */
- if (qcaspi_tx_ring_has_space(&qca->txr))
- netif_wake_queue(dev);
+ /* Trigger tx queue flush and QCA7000 reset */
+ qca->sync = QCASPI_SYNC_UNKNOWN;
}
static int
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 3448eb0f8a4a..0e1ebb39ab46 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -924,6 +924,9 @@ static int ravb_phy_init(struct net_device *ndev)
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
+ /* 10BASE is not supported */
+ phydev->supported &= ~PHY_10BT_FEATURES;
+
netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
phydev->addr, phydev->irq, phydev->drv->name);
@@ -1056,7 +1059,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_1_mcast_packets",
"rx_queue_1_errors",
"rx_queue_1_crc_errors",
- "rx_queue_1_frame_errors_",
+ "rx_queue_1_frame_errors",
"rx_queue_1_length_errors",
"rx_queue_1_missed_errors",
"rx_queue_1_over_errors",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 67cd24312c11..76c5ce04a271 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -52,6 +52,8 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+#define SH_ETH_OFFSET_INVALID ((u16)~0)
+
#define SH_ETH_OFFSET_DEFAULTS \
[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
@@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return;
+
+ iowrite32(data, mdp->addr + offset);
+}
+
+static u32 sh_eth_read(struct net_device *ndev, int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return ~0U;
+
+ return ioread32(mdp->addr + offset);
+}
+
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1147,7 +1171,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
}
mdp->rx_skbuff[i] = skb;
- rxdesc->addr = dma_addr;
+ rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1378,7 +1402,8 @@ static int sh_eth_txfree(struct net_device *ndev)
entry, edmac_to_cpu(mdp, txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
- dma_unmap_single(&ndev->dev, txdesc->addr,
+ dma_unmap_single(&ndev->dev,
+ edmac_to_cpu(mdp, txdesc->addr),
txdesc->buffer_length, DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
@@ -1437,6 +1462,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->cd->shift_rd0)
desc_status >>= 16;
+ skb = mdp->rx_skbuff[entry];
if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
ndev->stats.rx_errors++;
@@ -1452,16 +1478,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
ndev->stats.rx_missed_errors++;
if (desc_status & RD_RFS10)
ndev->stats.rx_over_errors++;
- } else {
+ } else if (skb) {
+ dma_addr = edmac_to_cpu(mdp, rxdesc->addr);
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(
- phys_to_virt(ALIGN(rxdesc->addr, 4)),
+ phys_to_virt(ALIGN(dma_addr, 4)),
pkt_len + 2);
- skb = mdp->rx_skbuff[entry];
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_unmap_single(&ndev->dev, rxdesc->addr,
+ dma_unmap_single(&ndev->dev, dma_addr,
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
@@ -1498,7 +1524,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb);
- rxdesc->addr = dma_addr;
+ rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
}
dma_wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
@@ -2306,8 +2332,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < mdp->num_rx_ring; i++) {
rxdesc = &mdp->rx_ring[i];
- rxdesc->status = 0;
- rxdesc->addr = 0xBADF00D0;
+ rxdesc->status = cpu_to_edmac(mdp, 0);
+ rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0);
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
}
@@ -2325,6 +2351,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
+ dma_addr_t dma_addr;
u32 entry;
unsigned long flags;
@@ -2347,14 +2374,14 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txdesc = &mdp->tx_ring[entry];
/* soft swap. */
if (!mdp->cd->hw_swap)
- sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
- skb->len + 2);
- txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+ sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
+ dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
+ txdesc->addr = cpu_to_edmac(mdp, dma_addr);
txdesc->buffer_length = skb->len;
dma_wmb(); /* TACT bit must be set after all the above writes */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 50382b1c9ddc..26ad1cf0bcf1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -546,31 +546,6 @@ static inline void sh_eth_soft_swap(char *src, int len)
#endif
}
-#define SH_ETH_OFFSET_INVALID ((u16) ~0)
-
-static inline void sh_eth_write(struct net_device *ndev, u32 data,
- int enum_index)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- u16 offset = mdp->reg_offset[enum_index];
-
- if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
- return;
-
- iowrite32(data, mdp->addr + offset);
-}
-
-static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- u16 offset = mdp->reg_offset[enum_index];
-
- if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
- return ~0U;
-
- return ioread32(mdp->addr + offset);
-}
-
static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
int enum_index)
{
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index c4a0e8a967dd..230fabd818ae 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3309,7 +3309,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- EFX_FILTER_FLAG_RX_RSS);
+ (efx_rss_enabled(efx) ?
+ EFX_FILTER_FLAG_RX_RSS : 0));
new_spec.dmaq_id = 0;
new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
rc = efx_ef10_filter_push(efx, &new_spec,
@@ -3931,6 +3932,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_dev_addr *addr_list;
+ enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
unsigned int i, j;
@@ -3945,11 +3947,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
addr_count = table->dev_uc_count;
}
+ filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
addr_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -3978,9 +3980,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
if (multicast && rollback) {
/* Also need an Ethernet broadcast filter */
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -4010,13 +4010,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
int rc;
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
+ filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
if (multicast)
efx_filter_set_mc_def(&spec);
@@ -4033,8 +4034,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
if (!nic_data->workaround_26807) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
+ filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
baddr);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 1aaf76c1ace8..10827476bc0b 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+static inline bool efx_rss_enabled(struct efx_nic *efx)
+{
+ return efx->rss_spread > 1;
+}
+
/* Filters */
void efx_mac_reconfigure(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5a1c5a8f278a..133e9e35be9e 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx,
*/
spec->priority = EFX_FILTER_PRI_AUTO;
spec->flags = (EFX_FILTER_FLAG_RX |
- (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
+ (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
(efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
spec->dmaq_id = 0;
}
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 3d5ee3259885..194f67d9f3bf 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
- while (tries--) {
+ while (--tries) {
val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 52b8ed9bd87c..adff46375a32 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
+
+ return ret;
}
static const struct of_device_id sun7i_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6d4c33a7f0b4..c21015b68097 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3043,8 +3043,6 @@ int stmmac_suspend(struct net_device *ndev)
priv->hw->dma->stop_tx(priv->ioaddr);
priv->hw->dma->stop_rx(priv->ioaddr);
- stmmac_clear_descriptors(priv);
-
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->hw, priv->wolopts);
@@ -3102,7 +3100,12 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
- init_dma_desc_rings(ndev, GFP_ATOMIC);
+ priv->cur_rx = 0;
+ priv->dirty_rx = 0;
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+ stmmac_clear_descriptors(priv);
+
stmmac_hw_setup(ndev, false);
stmmac_init_tx_coalesce(priv);
stmmac_set_rx_mode(ndev);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 31b19fdf659d..e6e00924f8ef 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1009,8 +1009,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
&fl6.saddr, &fl6.daddr, prio, ttl,
sport, geneve->dst_port,
!!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX));
-
- iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
tx_error:
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 908e8d486342..7f8e7662e28c 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -149,9 +149,14 @@ int mdio_mux_init(struct device *dev,
}
cb->bus_number = v;
cb->parent = pb;
+
cb->mii_bus = mdiobus_alloc();
+ if (!cb->mii_bus) {
+ ret_val = -ENOMEM;
+ of_node_put(child_bus_node);
+ break;
+ }
cb->mii_bus->priv = cb;
-
cb->mii_bus->irq = cb->phy_irq;
cb->mii_bus->name = "mdio_mux";
snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index cf6312fafea5..e13ad6cdcc22 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -339,9 +339,18 @@ static int ksz9021_config_init(struct phy_device *phydev)
{
const struct device *dev = &phydev->dev;
const struct device_node *of_node = dev->of_node;
+ const struct device *dev_walker;
- if (!of_node && dev->parent->of_node)
- of_node = dev->parent->of_node;
+ /* The Micrel driver has a deprecated option to place phy OF
+ * properties in the MAC node. Walk up the tree of devices to
+ * find a device with an OF node.
+ */
+ dev_walker = &phydev->dev;
+ do {
+ of_node = dev_walker->of_node;
+ dev_walker = dev_walker->parent;
+
+ } while (!of_node && dev_walker);
if (of_node) {
ksz9021_load_values_from_of(phydev, of_node,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index b8da2eabac3e..f3c63022eb3c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -550,6 +550,9 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern)
sk->sk_family = PF_PPPOX;
sk->sk_protocol = PX_PROTO_OE;
+ INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
+ pppoe_unbind_sock_work);
+
return 0;
}
@@ -614,8 +617,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
- INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work);
-
error = -EINVAL;
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
@@ -645,8 +646,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->pppoe_dev = NULL;
}
- memset(sk_pppox(po) + 1, 0,
- sizeof(struct pppox_sock) - sizeof(struct sock));
+ po->pppoe_ifindex = 0;
+ memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa));
+ memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay));
+ memset(&po->chan, 0, sizeof(po->chan));
+ po->next = NULL;
+ po->num = 0;
+
sk->sk_state = PPPOX_NONE;
}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index e18e0980bc61..90868ca5e341 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
struct pptp_opt *opt = &po->proto.pptp;
int error = 0;
+ if (sockaddr_len < sizeof(struct sockaddr_pppox))
+ return -EINVAL;
+
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
@@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
struct flowi4 fl4;
int error = 0;
+ if (sockaddr_len < sizeof(struct sockaddr_pppox))
+ return -EINVAL;
+
if (sp->sa_protocol != PX_PROTO_PPTP)
return -EINVAL;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bbde9884ab8a..8973abdec9f6 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
goto err;
- ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0);
+ ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data);
if (ret)
goto err;
@@ -582,6 +582,26 @@ static const struct driver_info cdc_mbim_info_zlp = {
.tx_fixup = cdc_mbim_tx_fixup,
};
+/* The spefication explicitly allows NDPs to be placed anywhere in the
+ * frame, but some devices fail unless the NDP is placed after the IP
+ * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this
+ * behaviour.
+ *
+ * Note: The current implementation of this feature restricts each NTB
+ * to a single NDP, implying that multiplexed sessions cannot share an
+ * NTB. This might affect performace for multiplexed sessions.
+ */
+static const struct driver_info cdc_mbim_info_ndp_to_end = {
+ .description = "CDC MBIM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+ .bind = cdc_mbim_bind,
+ .unbind = cdc_mbim_unbind,
+ .manage_power = cdc_mbim_manage_power,
+ .rx_fixup = cdc_mbim_rx_fixup,
+ .tx_fixup = cdc_mbim_tx_fixup,
+ .data = CDC_NCM_FLAG_NDP_TO_END,
+};
+
static const struct usb_device_id mbim_devs[] = {
/* This duplicate NCM entry is intentional. MBIM devices can
* be disguised as NCM by default, and this is necessary to
@@ -597,6 +617,10 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
+ /* Huawei E3372 fails unless NDP comes after the IP packets */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
+ },
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b45e5cae2a6b..c48c7f8ca727 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -998,10 +998,18 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
* NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
* the wNdpIndex field in the header is actually not consistent with reality. It will be later.
*/
- if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
if (ctx->delayed_ndp16->dwSignature == sign)
return ctx->delayed_ndp16;
+ /* We can only push a single NDP to the end. Return
+ * NULL to send what we've already got and queue this
+ * skb for later.
+ */
+ else if (ctx->delayed_ndp16->dwSignature)
+ return NULL;
+ }
+
/* follow the chain of NDPs, looking for a match */
while (ndpoffset) {
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 34642a9583e0..e691c7df1ac2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3067,17 +3067,6 @@ static int rtl8152_open(struct net_device *netdev)
mutex_lock(&tp->control);
- /* The WORK_ENABLE may be set when autoresume occurs */
- if (test_bit(WORK_ENABLE, &tp->flags)) {
- clear_bit(WORK_ENABLE, &tp->flags);
- usb_kill_urb(tp->intr_urb);
- cancel_delayed_work_sync(&tp->schedule);
-
- /* disable the tx/rx, if the workqueue has enabled them. */
- if (netif_carrier_ok(netdev))
- tp->rtl_ops.disable(tp);
- }
-
tp->rtl_ops.up(tp);
rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3124,12 +3113,6 @@ static int rtl8152_close(struct net_device *netdev)
} else {
mutex_lock(&tp->control);
- /* The autosuspend may have been enabled and wouldn't
- * be disable when autoresume occurs, because the
- * netif_running() would be false.
- */
- rtl_runtime_suspend_enable(tp, false);
-
tp->rtl_ops.down(tp);
mutex_unlock(&tp->control);
@@ -3512,7 +3495,7 @@ static int rtl8152_resume(struct usb_interface *intf)
netif_device_attach(tp->netdev);
}
- if (netif_running(tp->netdev)) {
+ if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_runtime_suspend_enable(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
@@ -3532,6 +3515,8 @@ static int rtl8152_resume(struct usb_interface *intf)
}
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ if (tp->netdev->flags & IFF_UP)
+ rtl_runtime_suspend_enable(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b1ae4cbf2453..767ab11a6e9f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -140,6 +140,12 @@ struct virtnet_info {
/* CPU hot plug notifier */
struct notifier_block nb;
+
+ /* Control VQ buffers: protected by the rtnl lock */
+ struct virtio_net_ctrl_hdr ctrl_hdr;
+ virtio_net_ctrl_ack ctrl_status;
+ u8 ctrl_promisc;
+ u8 ctrl_allmulti;
};
struct padded_vnet_hdr {
@@ -974,31 +980,30 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
struct scatterlist *out)
{
struct scatterlist *sgs[4], hdr, stat;
- struct virtio_net_ctrl_hdr ctrl;
- virtio_net_ctrl_ack status = ~0;
unsigned out_num = 0, tmp;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
- ctrl.class = class;
- ctrl.cmd = cmd;
+ vi->ctrl_status = ~0;
+ vi->ctrl_hdr.class = class;
+ vi->ctrl_hdr.cmd = cmd;
/* Add header */
- sg_init_one(&hdr, &ctrl, sizeof(ctrl));
+ sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
sgs[out_num++] = &hdr;
if (out)
sgs[out_num++] = out;
/* Add return status. */
- sg_init_one(&stat, &status, sizeof(status));
+ sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq)))
- return status == VIRTIO_NET_OK;
+ return vi->ctrl_status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
@@ -1007,7 +1012,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
!virtqueue_is_broken(vi->cvq))
cpu_relax();
- return status == VIRTIO_NET_OK;
+ return vi->ctrl_status == VIRTIO_NET_OK;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1149,7 +1154,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg[2];
- u8 promisc, allmulti;
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
int uc_count;
@@ -1161,22 +1165,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
- promisc = ((dev->flags & IFF_PROMISC) != 0);
- allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
+ vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
- sg_init_one(sg, &promisc, sizeof(promisc));
+ sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
- promisc ? "en" : "dis");
+ vi->ctrl_promisc ? "en" : "dis");
- sg_init_one(sg, &allmulti, sizeof(allmulti));
+ sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
- allmulti ? "en" : "dis");
+ vi->ctrl_allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6369a5734d4c..ba363cedef80 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1158,7 +1158,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
struct pcpu_sw_netstats *stats;
union vxlan_addr saddr;
int err = 0;
- union vxlan_addr *remote_ip;
/* For flow based devices, map all packets to VNI 0 */
if (vs->flags & VXLAN_F_COLLECT_METADATA)
@@ -1169,7 +1168,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
if (!vxlan)
goto drop;
- remote_ip = &vxlan->default_dst.remote_ip;
skb_reset_mac_header(skb);
skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -1179,8 +1177,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
goto drop;
- /* Re-examine inner Ethernet packet */
- if (remote_ip->sa.sa_family == AF_INET) {
+ /* Get data from the outer IP header */
+ if (vxlan_get_sk_family(vs) == AF_INET) {
oip = ip_hdr(skb);
saddr.sin.sin_addr.s_addr = oip->saddr;
saddr.sa.sa_family = AF_INET;
@@ -1848,6 +1846,34 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
!(vxflags & VXLAN_F_UDP_CSUM));
}
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
+ struct sk_buff *skb, int oif,
+ const struct in6_addr *daddr,
+ struct in6_addr *saddr)
+{
+ struct dst_entry *ndst;
+ struct flowi6 fl6;
+ int err;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.daddr = *daddr;
+ fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = IPPROTO_UDP;
+
+ err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
+ vxlan->vn6_sock->sock->sk,
+ &ndst, &fl6);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ *saddr = fl6.saddr;
+ return ndst;
+}
+#endif
+
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan)
@@ -2035,21 +2061,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct dst_entry *ndst;
- struct flowi6 fl6;
+ struct in6_addr saddr;
u32 rt6i_flags;
if (!vxlan->vn6_sock)
goto drop;
sk = vxlan->vn6_sock->sock->sk;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
- fl6.daddr = dst->sin6.sin6_addr;
- fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
- fl6.flowi6_mark = skb->mark;
- fl6.flowi6_proto = IPPROTO_UDP;
-
- if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
+ ndst = vxlan6_get_route(vxlan, skb,
+ rdst ? rdst->remote_ifindex : 0,
+ &dst->sin6.sin6_addr, &saddr);
+ if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr);
dev->stats.tx_carrier_errors++;
@@ -2081,7 +2103,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = ttl ? : ip6_dst_hoplimit(ndst);
- err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
+ err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
0, ttl, src_port, dst_port, htonl(vni << 8), md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
flags);
@@ -2395,9 +2417,30 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
vxlan->cfg.port_max, true);
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
- if (ip_tunnel_info_af(info) == AF_INET)
+ if (ip_tunnel_info_af(info) == AF_INET) {
+ if (!vxlan->vn4_sock)
+ return -EINVAL;
return egress_ipv4_tun_info(dev, skb, info, sport, dport);
- return -EINVAL;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *ndst;
+
+ if (!vxlan->vn6_sock)
+ return -EINVAL;
+ ndst = vxlan6_get_route(vxlan, skb, 0,
+ &info->key.u.ipv6.dst,
+ &info->key.u.ipv6.src);
+ if (IS_ERR(ndst))
+ return PTR_ERR(ndst);
+ dst_release(ndst);
+
+ info->key.tp_src = sport;
+ info->key.tp_dst = dport;
+#else /* !CONFIG_IPV6 */
+ return -EPFNOSUPPORT;
+#endif
+ }
+ return 0;
}
static const struct net_device_ops vxlan_netdev_ops = {
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 06c336410235..15f2acb4d5cd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -271,9 +271,9 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
return 0;
}
-static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
+static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
{
- struct nvme_ns *ns = q->queuedata;
+ struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_id *nvme_nvm_id;
struct nvme_nvm_command c = {};
@@ -308,10 +308,10 @@ out:
return ret;
}
-static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
+static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
nvm_l2p_update_fn *update_l2p, void *priv)
{
- struct nvme_ns *ns = q->queuedata;
+ struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {};
u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
@@ -415,10 +415,10 @@ out:
return ret;
}
-static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
+static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
int type)
{
- struct nvme_ns *ns = q->queuedata;
+ struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {};
int ret = 0;
@@ -455,7 +455,7 @@ static void nvme_nvm_end_io(struct request *rq, int error)
struct nvm_rq *rqd = rq->end_io_data;
struct nvm_dev *dev = rqd->dev;
- if (dev->mt->end_io(rqd, error))
+ if (dev->mt && dev->mt->end_io(rqd, error))
pr_err("nvme: err status: %x result: %lx\n",
rq->errors, (unsigned long)rq->special);
@@ -463,8 +463,9 @@ static void nvme_nvm_end_io(struct request *rq, int error)
blk_mq_free_request(rq);
}
-static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
+static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
+ struct request_queue *q = dev->q;
struct nvme_ns *ns = q->queuedata;
struct request *rq;
struct bio *bio = rqd->bio;
@@ -502,8 +503,9 @@ static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
return 0;
}
-static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
+static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
{
+ struct request_queue *q = dev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_nvm_command c = {};
@@ -515,9 +517,9 @@ static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
}
-static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name)
+static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
{
- struct nvme_ns *ns = q->queuedata;
+ struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
@@ -530,7 +532,7 @@ static void nvme_nvm_destroy_dma_pool(void *pool)
dma_pool_destroy(dma_pool);
}
-static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool,
+static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return dma_pool_alloc(pool, mem_flags, dma_handler);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index cd53fe4a0c86..9582c5703b3c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -485,9 +485,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
int rone;
u64 offset = OF_BAD_ADDR;
- /* Normally, an absence of a "ranges" property means we are
+ /*
+ * Normally, an absence of a "ranges" property means we are
* crossing a non-translatable boundary, and thus the addresses
- * below the current not cannot be converted to CPU physical ones.
+ * below the current cannot be converted to CPU physical ones.
* Unfortunately, while this is very clear in the spec, it's not
* what Apple understood, and they do have things like /uni-n or
* /ht nodes with no "ranges" property and a lot of perfectly
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index d2430298a309..655f79db7899 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/initrd.h>
#include <linux/memblock.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
@@ -436,6 +437,8 @@ static void *kernel_tree_alloc(u64 size, u64 align)
return kzalloc(size, GFP_KERNEL);
}
+static DEFINE_MUTEX(of_fdt_unflatten_mutex);
+
/**
* of_fdt_unflatten_tree - create tree of device_nodes from flat blob
*
@@ -447,7 +450,9 @@ static void *kernel_tree_alloc(u64 size, u64 align)
void of_fdt_unflatten_tree(const unsigned long *blob,
struct device_node **mynodes)
{
+ mutex_lock(&of_fdt_unflatten_mutex);
__unflatten_device_tree(blob, mynodes, &kernel_tree_alloc);
+ mutex_unlock(&of_fdt_unflatten_mutex);
}
EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
@@ -1041,7 +1046,7 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
- pr_err("Reserved memory not supported, ignoring range 0x%pa - 0x%pa%s\n",
+ pr_err("Reserved memory not supported, ignoring range %pa - %pa%s\n",
&base, &size, nomap ? " (nomap)" : "");
return -ENOSYS;
}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 902b89be7217..4fa916dffc91 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
* Returns a pointer to the interrupt parent node, or NULL if the interrupt
* parent could not be determined.
*/
-static struct device_node *of_irq_find_parent(struct device_node *child)
+struct device_node *of_irq_find_parent(struct device_node *child)
{
struct device_node *p;
const __be32 *parp;
@@ -77,6 +77,7 @@ static struct device_node *of_irq_find_parent(struct device_node *child)
return p;
}
+EXPORT_SYMBOL_GPL(of_irq_find_parent);
/**
* of_irq_parse_raw - Low level interrupt tree parsing
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index be77e75c587d..1a3556a9e9ea 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -206,7 +206,13 @@ static int __init __rmem_cmp(const void *a, const void *b)
{
const struct reserved_mem *ra = a, *rb = b;
- return ra->base - rb->base;
+ if (ra->base < rb->base)
+ return -1;
+
+ if (ra->base > rb->base)
+ return 1;
+
+ return 0;
}
static void __init __rmem_check_for_overlap(void)
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 761e77bfce5d..e56f1569f6c3 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
struct scatterlist *contig_sg; /* contig chunk head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
unsigned int n_mappings = 0;
- unsigned int max_seg_size = dma_get_max_seg_size(dev);
+ unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
+ (unsigned)DMA_CHUNK_SIZE);
+ unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
+ if (max_seg_boundary) /* check if the addition above didn't overflow */
+ max_seg_size = min(max_seg_size, max_seg_boundary);
while (nents > 0) {
@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
/*
** First make sure current dma stream won't
- ** exceed DMA_CHUNK_SIZE if we coalesce the
+ ** exceed max_seg_size if we coalesce the
** next entry.
*/
- if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
- IOVP_SIZE) > DMA_CHUNK_SIZE))
- break;
-
- if (startsg->length + dma_len > max_seg_size)
+ if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
+ max_seg_size))
break;
/*
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index e5dda38bdde5..99da549d5d06 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -55,8 +55,10 @@
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
+#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
#define TLP_HDR_SIZE 3
#define TLP_LOOP 500
+#define RP_DEVFN 0
#define INTX_NUM 4
@@ -166,34 +168,41 @@ static bool altera_pcie_valid_config(struct altera_pcie *pcie,
static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
{
- u8 loop;
+ int i;
bool sop = 0;
u32 ctrl;
u32 reg0, reg1;
+ u32 comp_status = 1;
/*
* Minimum 2 loops to read TLP headers and 1 loop to read data
* payload.
*/
- for (loop = 0; loop < TLP_LOOP; loop++) {
+ for (i = 0; i < TLP_LOOP; i++) {
ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
reg0 = cra_readl(pcie, RP_RXCPL_REG0);
reg1 = cra_readl(pcie, RP_RXCPL_REG1);
- if (ctrl & RP_RXCPL_SOP)
+ if (ctrl & RP_RXCPL_SOP) {
sop = true;
+ comp_status = TLP_COMP_STATUS(reg1);
+ }
if (ctrl & RP_RXCPL_EOP) {
+ if (comp_status)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
if (value)
*value = reg0;
+
return PCIBIOS_SUCCESSFUL;
}
}
udelay(5);
}
- return -ENOENT;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
@@ -233,7 +242,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
else
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
- headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn),
+ headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
TLP_READ_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
@@ -253,7 +262,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
else
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
- headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn),
+ headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
TLP_WRITE_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
@@ -458,7 +467,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM,
+ pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 53e463244bb7..7eaa4c87fec7 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -54,7 +54,7 @@ static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
struct irq_domain *domain;
domain = pci_msi_get_domain(dev);
- if (domain)
+ if (domain && irq_domain_is_hierarchy(domain))
return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
return arch_setup_msi_irqs(dev, nvec, type);
@@ -65,7 +65,7 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
struct irq_domain *domain;
domain = pci_msi_get_domain(dev);
- if (domain)
+ if (domain && irq_domain_is_hierarchy(domain))
pci_msi_domain_free_irqs(domain, dev);
else
arch_teardown_msi_irqs(dev);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 4446fcb5effd..d7ffd66814bb 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev)
pci_dev->state_saved = false;
pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
- suspend_report_result(pm->runtime_suspend, error);
- if (error)
+ if (error) {
+ /*
+ * -EBUSY and -EAGAIN is used to request the runtime PM core
+ * to schedule a new suspend, so log the event only with debug
+ * log level.
+ */
+ if (error == -EBUSY || error == -EAGAIN)
+ dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
+ pm->runtime_suspend, error);
+ else
+ dev_err(dev, "can't suspend (%pf returned %d)\n",
+ pm->runtime_suspend, error);
+
return error;
+ }
if (!pci_dev->d3cold_allowed)
pci_dev->no_d3cold = true;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 5f692ae40749..64eed87d34a8 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -364,6 +364,7 @@ config SCSI_HPSA
tristate "HP Smart Array SCSI driver"
depends on PCI && SCSI
select CHECK_SIGNATURE
+ select SCSI_SAS_ATTRS
help
This driver supports HP Smart Array Controllers (circa 2009).
It is a SCSI alternative to the cciss driver, which is a block
@@ -499,6 +500,7 @@ config SCSI_ADVANSYS
tristate "AdvanSys SCSI support"
depends on SCSI
depends on ISA || EISA || PCI
+ depends on ISA_DMA_API || !ISA
help
This is a driver for all SCSI host adapters manufactured by
AdvanSys. It is documented in the kernel source in
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 519f9a4b3dad..febbd83e2ecd 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7803,7 +7803,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
return ASC_BUSY;
}
scsiqp->sense_addr = cpu_to_le32(sense_addr);
- scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
+ scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
/* Build ADV_SCSI_REQ_Q */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 323982fd00c3..82ac1cd818ac 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -333,6 +333,17 @@ static void scsi_host_dev_release(struct device *dev)
kfree(queuedata);
}
+ if (shost->shost_state == SHOST_CREATED) {
+ /*
+ * Free the shost_dev device name here if scsi_host_alloc()
+ * and scsi_host_put() have been called but neither
+ * scsi_host_add() nor scsi_host_remove() has been called.
+ * This avoids that the memory allocated for the shost_dev
+ * name is leaked.
+ */
+ kfree(dev_name(&shost->shost_dev));
+ }
+
scsi_destroy_command_freelist(shost);
if (shost_use_blk_mq(shost)) {
if (shost->tag_set.tags)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6a8f95808ee0..a3860367b568 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -8671,7 +8671,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
- if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
+ if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
goto out;
errout:
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index 29061467cc17..b736dbc80485 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -71,3 +71,12 @@ config SCSI_MPT3SAS_MAX_SGE
MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
can be 256. However, it may decreased down to 16. Decreasing this
parameter will reduce memory requirements on a per controller instance.
+
+config SCSI_MPT2SAS
+ tristate "Legacy MPT2SAS config option"
+ default n
+ select SCSI_MPT3SAS
+ depends on PCI && SCSI
+ ---help---
+ Dummy config option for backwards compatiblity: configure the MPT3SAS
+ driver instead.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d95206b7e116..9ab77b06434d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3905,8 +3905,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
* We do not expose raid functionality to upper layer for warpdrive.
*/
if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
- && (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
- scmd->cmd_len != 32)
+ && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90fdf0e859e3..675e7fab0796 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -758,7 +758,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
struct device_attribute *attr,
const char *buffer, size_t size)
{
- int val = 0;
+ unsigned int val = 0;
struct mvs_info *mvi = NULL;
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -766,7 +766,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
if (buffer == NULL)
return size;
- if (sscanf(buffer, "%d", &val) != 1)
+ if (sscanf(buffer, "%u", &val) != 1)
return -EINVAL;
if (val >= 0x10000) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index eb0cc5475c45..b6b4cfdd7620 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -433,7 +433,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
if (off_in < QLA82XX_PCI_CRBSPACE)
return -1;
- *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE);
+ off_in -= QLA82XX_PCI_CRBSPACE;
/* Try direct map */
m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
@@ -443,6 +443,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
return 0;
}
/* Not in direct map, use crb window */
+ *off_out = (void __iomem *)off_in;
return 1;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index dfcc45bb03b1..d09d60293c27 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -465,8 +465,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
0} },
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
@@ -477,8 +478,8 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
/* 20 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
+ {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 83245391e956..054923e3393c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -701,9 +701,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* strings.
*/
if (sdev->inquiry_len < 36) {
- sdev_printk(KERN_INFO, sdev,
- "scsi scan: INQUIRY result too short (%d),"
- " using 36\n", sdev->inquiry_len);
+ if (!sdev->host->short_inquiry) {
+ shost_printk(KERN_INFO, sdev->host,
+ "scsi scan: INQUIRY result too short (%d),"
+ " using 36\n", sdev->inquiry_len);
+ sdev->host->short_inquiry = 1;
+ }
sdev->inquiry_len = 36;
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8d2312239ae0..21930c9ac9cd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1102,6 +1102,14 @@ void __scsi_remove_device(struct scsi_device *sdev)
{
struct device *dev = &sdev->sdev_gendev;
+ /*
+ * This cleanup path is not reentrant and while it is impossible
+ * to get a new reference with scsi_device_get() someone can still
+ * hold a previously acquired one.
+ */
+ if (sdev->sdev_state == SDEV_DEL)
+ return;
+
if (sdev->is_visible) {
if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
return;
@@ -1110,7 +1118,9 @@ void __scsi_remove_device(struct scsi_device *sdev)
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
scsi_dh_remove_device(sdev);
- }
+ device_del(dev);
+ } else
+ put_device(&sdev->sdev_dev);
/*
* Stop accepting new requests and wait until all queuecommand() and
@@ -1121,16 +1131,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
blk_cleanup_queue(sdev->request_queue);
cancel_work_sync(&sdev->requeue_work);
- /*
- * Remove the device after blk_cleanup_queue() has been called such
- * a possible bdi_register() call with the same name occurs after
- * blk_cleanup_queue() has called bdi_destroy().
- */
- if (sdev->is_visible)
- device_del(dev);
- else
- put_device(&sdev->sdev_dev);
-
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
transport_destroy_device(dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 54519804c46a..3d22fc3e3c1a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
unsigned int max_blocks = 0;
q->limits.discard_zeroes_data = 0;
- q->limits.discard_alignment = sdkp->unmap_alignment *
- logical_block_size;
- q->limits.discard_granularity =
- max(sdkp->physical_block_size,
- sdkp->unmap_granularity * logical_block_size);
+
+ /*
+ * When LBPRZ is reported, discard alignment and granularity
+ * must be fixed to the logical block size. Otherwise the block
+ * layer will drop misaligned portions of the request which can
+ * lead to data corruption. If LBPRZ is not set, we honor the
+ * device preference.
+ */
+ if (sdkp->lbprz) {
+ q->limits.discard_alignment = 0;
+ q->limits.discard_granularity = 1;
+ } else {
+ q->limits.discard_alignment = sdkp->unmap_alignment *
+ logical_block_size;
+ q->limits.discard_granularity =
+ max(sdkp->physical_block_size,
+ sdkp->unmap_granularity * logical_block_size);
+ }
sdkp->provisioning_mode = mode;
@@ -2321,11 +2334,8 @@ got_data:
}
}
- if (sdkp->capacity > 0xffffffff) {
+ if (sdkp->capacity > 0xffffffff)
sdp->use_16_for_rw = 1;
- sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
- } else
- sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
/* Rescale capacity to 512-byte units */
if (sector_size == 4096)
@@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
{
unsigned int sector_sz = sdkp->device->sector_size;
const int vpd_len = 64;
- u32 max_xfer_length;
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
if (!buffer ||
@@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
goto out;
- max_xfer_length = get_unaligned_be32(&buffer[8]);
- if (max_xfer_length)
- sdkp->max_xfer_blocks = max_xfer_length;
-
blk_queue_io_min(sdkp->disk->queue,
get_unaligned_be16(&buffer[6]) * sector_sz);
- blk_queue_io_opt(sdkp->disk->queue,
- get_unaligned_be32(&buffer[12]) * sector_sz);
+
+ sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
+ sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
if (buffer[3] == 0x3c) {
unsigned int lba_count, desc_count;
@@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
return 0;
}
+static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
+{
+ return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
+ struct request_queue *q = sdkp->disk->queue;
unsigned char *buffer;
- unsigned int max_xfer;
+ unsigned int dev_max, rw_max;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
"sd_revalidate_disk\n"));
@@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
sd_set_flush_flag(sdkp);
- max_xfer = sdkp->max_xfer_blocks;
- max_xfer <<= ilog2(sdp->sector_size) - 9;
+ /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
+ dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
+
+ /* Some devices report a maximum block count for READ/WRITE requests. */
+ dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
+ q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+
+ /*
+ * Use the device's preferred I/O size for reads and writes
+ * unless the reported value is unreasonably large (or garbage).
+ */
+ if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max &&
+ sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS)
+ rw_max = q->limits.io_opt =
+ logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+ else
+ rw_max = BLK_DEF_MAX_SECTORS;
- sdkp->disk->queue->limits.max_sectors =
- min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+ /* Combine with controller limits */
+ q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 63ba5ca7f9a1..5f2a84aff29f 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -67,6 +67,7 @@ struct scsi_disk {
atomic_t openers;
sector_t capacity; /* size in 512-byte sectors */
u32 max_xfer_blocks;
+ u32 opt_xfer_blocks;
u32 max_ws_blocks;
u32 max_unmap_blocks;
u32 unmap_granularity;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e0a1e52a04e7..2e522951b619 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4083,6 +4083,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
}
cdev->owner = THIS_MODULE;
cdev->ops = &st_fops;
+ STm->cdevs[rew] = cdev;
error = cdev_add(cdev, cdev_devno, 1);
if (error) {
@@ -4091,7 +4092,6 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
pr_err("st%d: Device not attached.\n", dev_num);
goto out_free;
}
- STm->cdevs[rew] = cdev;
i = mode << (4 - ST_NBR_MODE_BITS);
snprintf(name, 10, "%s%s%s", rew ? "n" : "",
@@ -4110,8 +4110,9 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
return 0;
out_free:
cdev_del(STm->cdevs[rew]);
- STm->cdevs[rew] = NULL;
out:
+ STm->cdevs[rew] = NULL;
+ STm->devs[rew] = NULL;
return error;
}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 195c41d7bd53..0813163f962f 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
err:
sg = table->sgl;
for (i -= 1; i >= 0; i--) {
- gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
sg = sg_next(sg);
}
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
- gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
}
chunk_heap->allocated -= allocated_size;
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index bfbf1c56bd22..6eb600ff7056 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -159,7 +159,7 @@ static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private)
struct iio_dummy_state *st = iio_priv(indio_dev);
st->event_timestamp = iio_get_time_ns();
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
/**
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index f5d741f25ffd..485ab2670918 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -110,7 +110,6 @@ struct libcfs_ioctl_handler {
#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
-#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long)
/* lnet ioctls */
#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index 07a68594c279..e7c2b26156b9 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -274,23 +274,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
}
break;
- case IOC_LIBCFS_PING_TEST: {
- extern void (kping_client)(struct libcfs_ioctl_data *);
- void (*ping)(struct libcfs_ioctl_data *);
-
- CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n",
- data->ioc_count, libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(data->ioc_nid));
- ping = symbol_get(kping_client);
- if (!ping)
- CERROR("symbol_get failed\n");
- else {
- ping(data);
- symbol_put(kping_client);
- }
- return 0;
- }
-
default: {
struct libcfs_ioctl_handler *hand;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index f61ef669644c..a4a9a763ff02 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1270,6 +1270,7 @@ static int
echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
{
struct lov_stripe_md *ulsm = _ulsm;
+ struct lov_oinfo **p;
int nob, i;
nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
@@ -1279,9 +1280,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
return -EFAULT;
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
- sizeof(lsm->lsm_oinfo[0])))
+ for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
+ struct lov_oinfo __user *up;
+ if (get_user(up, ulsm->lsm_oinfo + i) ||
+ copy_to_user(up, *p, sizeof(struct lov_oinfo)))
return -EFAULT;
}
return 0;
@@ -1289,9 +1291,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
static int
echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
- void *ulsm, int ulsm_nob)
+ struct lov_stripe_md __user *ulsm, int ulsm_nob)
{
struct echo_client_obd *ec = ed->ed_ec;
+ struct lov_oinfo **p;
int i;
if (ulsm_nob < sizeof(*lsm))
@@ -1306,11 +1309,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
return -EINVAL;
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (copy_from_user(lsm->lsm_oinfo[i],
- ((struct lov_stripe_md *)ulsm)-> \
- lsm_oinfo[i],
- sizeof(lsm->lsm_oinfo[0])))
+ for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
+ struct lov_oinfo __user *up;
+ if (get_user(up, ulsm->lsm_oinfo + i) ||
+ copy_from_user(*p, up, sizeof(struct lov_oinfo)))
return -EFAULT;
}
return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index b30e7423549b..26ca4f910cb0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1838,6 +1838,11 @@ static const struct usb_device_id acm_ids[] = {
},
#endif
+ /* Exclude Infineon Flash Loader utility */
+ { USB_DEVICE(0x058b, 0x0041),
+ .driver_info = IGNORE_DEVICE,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 7caff020106e..5050760f5e17 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
USB_SS_MULT(desc->bmAttributes) > 3) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
- "setting to 3\n", desc->bmAttributes + 1,
+ "setting to 3\n",
+ USB_SS_MULT(desc->bmAttributes),
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bmAttributes = 2;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index bdeadc112d29..a5cc032ef77a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
int usb_device_supports_lpm(struct usb_device *udev)
{
+ /* Some devices have trouble with LPM */
+ if (udev->quirks & USB_QUIRK_NO_LPM)
+ return 0;
+
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
*/
@@ -4512,6 +4516,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
goto fail;
}
+ usb_detect_quirks(udev);
+
if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(udev);
if (!retval) {
@@ -4710,7 +4716,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
if (status < 0)
goto loop;
- usb_detect_quirks(udev);
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(1000);
@@ -5326,9 +5331,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
if (udev->usb2_hw_lpm_enabled == 1)
usb_set_usb2_hardware_lpm(udev, 0);
- bos = udev->bos;
- udev->bos = NULL;
-
/* Disable LPM and LTM while we reset the device and reinstall the alt
* settings. Device-initiated LPM settings, and system exit latency
* settings are cleared when the device is reset, so we have to set
@@ -5337,15 +5339,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
ret = usb_unlocked_disable_lpm(udev);
if (ret) {
dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
- goto re_enumerate;
+ goto re_enumerate_no_bos;
}
ret = usb_disable_ltm(udev);
if (ret) {
dev_err(&udev->dev, "%s Failed to disable LTM\n.",
__func__);
- goto re_enumerate;
+ goto re_enumerate_no_bos;
}
+ bos = udev->bos;
+ udev->bos = NULL;
+
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
/* ep0 maxpacket size may change; let the HCD know about it.
@@ -5442,10 +5447,11 @@ done:
return 0;
re_enumerate:
- /* LPM state doesn't matter when we're about to destroy the device. */
- hub_port_logical_disconnect(parent_hub, port1);
usb_release_bos_descriptor(udev);
udev->bos = bos;
+re_enumerate_no_bos:
+ /* LPM state doesn't matter when we're about to destroy the device. */
+ hub_port_logical_disconnect(parent_hub, port1);
return -ENODEV;
}
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 210618319f10..5487fe308f01 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -206,7 +206,7 @@ static int link_peers(struct usb_port *left, struct usb_port *right)
else
method = "default";
- pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
+ pr_debug("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
dev_name(&left->dev), dev_name(&right->dev), method,
dev_name(&left->dev),
lpeer ? dev_name(&lpeer->dev) : "none",
@@ -265,7 +265,7 @@ static void link_peers_report(struct usb_port *left, struct usb_port *right)
if (rc == 0) {
dev_dbg(&left->dev, "peered to %s\n", dev_name(&right->dev));
} else {
- dev_warn(&left->dev, "failed to peer to %s (%d)\n",
+ dev_dbg(&left->dev, "failed to peer to %s (%d)\n",
dev_name(&right->dev), rc);
pr_warn_once("usb: port power management may be unreliable\n");
usb_port_block_power_off = 1;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f5a381945db2..6dc810bce295 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -125,6 +125,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
+ { USB_DEVICE(0x04f3, 0x21b8), .driver_info =
+ USB_QUIRK_DEVICE_QUALIFIER },
+
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -199,6 +202,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Blackmagic Design Intensity Shuttle */
+ { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
+
+ /* Blackmagic Design UltraStudio SDI */
+ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index e61d773cf65e..39c1cbf0e75d 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -125,9 +125,11 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
if (ret)
return ret;
- ret = clk_prepare_enable(hsotg->clk);
- if (ret)
- return ret;
+ if (hsotg->clk) {
+ ret = clk_prepare_enable(hsotg->clk);
+ if (ret)
+ return ret;
+ }
if (hsotg->uphy)
ret = usb_phy_init(hsotg->uphy);
@@ -175,7 +177,8 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
if (ret)
return ret;
- clk_disable_unprepare(hsotg->clk);
+ if (hsotg->clk)
+ clk_disable_unprepare(hsotg->clk);
ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
@@ -212,14 +215,41 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
*/
hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy");
if (IS_ERR(hsotg->phy)) {
- hsotg->phy = NULL;
+ ret = PTR_ERR(hsotg->phy);
+ switch (ret) {
+ case -ENODEV:
+ case -ENOSYS:
+ hsotg->phy = NULL;
+ break;
+ case -EPROBE_DEFER:
+ return ret;
+ default:
+ dev_err(hsotg->dev, "error getting phy %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (!hsotg->phy) {
hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2);
- if (IS_ERR(hsotg->uphy))
- hsotg->uphy = NULL;
- else
- hsotg->plat = dev_get_platdata(hsotg->dev);
+ if (IS_ERR(hsotg->uphy)) {
+ ret = PTR_ERR(hsotg->uphy);
+ switch (ret) {
+ case -ENODEV:
+ case -ENXIO:
+ hsotg->uphy = NULL;
+ break;
+ case -EPROBE_DEFER:
+ return ret;
+ default:
+ dev_err(hsotg->dev, "error getting usb phy %d\n",
+ ret);
+ return ret;
+ }
+ }
}
+ hsotg->plat = dev_get_platdata(hsotg->dev);
+
if (hsotg->phy) {
/*
* If using the generic PHY framework, check if the PHY bus
@@ -229,11 +259,6 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
hsotg->phyif = GUSBCFG_PHYIF8;
}
- if (!hsotg->phy && !hsotg->uphy && !hsotg->plat) {
- dev_err(hsotg->dev, "no platform data or transceiver defined\n");
- return -EPROBE_DEFER;
- }
-
/* Clock */
hsotg->clk = devm_clk_get(hsotg->dev, "otg");
if (IS_ERR(hsotg->clk)) {
@@ -342,20 +367,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
return retval;
- irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- dev_err(&dev->dev, "missing IRQ resource\n");
- return irq;
- }
-
- dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
- irq);
- retval = devm_request_irq(hsotg->dev, irq,
- dwc2_handle_common_intr, IRQF_SHARED,
- dev_name(hsotg->dev), hsotg);
- if (retval)
- return retval;
-
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
hsotg->regs = devm_ioremap_resource(&dev->dev, res);
if (IS_ERR(hsotg->regs))
@@ -390,6 +401,20 @@ static int dwc2_driver_probe(struct platform_device *dev)
dwc2_set_all_params(hsotg->core_params, -1);
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ dev_err(&dev->dev, "missing IRQ resource\n");
+ return irq;
+ }
+
+ dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
+ irq);
+ retval = devm_request_irq(hsotg->dev, irq,
+ dwc2_handle_common_intr, IRQF_SHARED,
+ dev_name(hsotg->dev), hsotg);
+ if (retval)
+ return retval;
+
retval = dwc2_lowlevel_hw_enable(hsotg);
if (retval)
return retval;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e24a01cc98df..a58376fd65fe 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1078,6 +1078,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* little bit faster.
*/
if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ !usb_endpoint_xfer_int(dep->endpoint.desc) &&
!(dep->flags & DWC3_EP_BUSY)) {
ret = __dwc3_gadget_kick_transfer(dep, 0, true);
goto out;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index adc6d52efa46..cf43e9e18368 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -423,7 +423,7 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
spin_unlock_irq(&ffs->ev.waitq.lock);
mutex_unlock(&ffs->mutex);
- return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size;
+ return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
}
static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
@@ -513,7 +513,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
/* unlocks spinlock */
ret = __ffs_ep0_queue_wait(ffs, data, len);
- if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
+ if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
ret = -EFAULT;
goto done_mutex;
@@ -3493,7 +3493,7 @@ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
if (unlikely(!data))
return ERR_PTR(-ENOMEM);
- if (unlikely(__copy_from_user(data, buf, len))) {
+ if (unlikely(copy_from_user(data, buf, len))) {
kfree(data);
return ERR_PTR(-EFAULT);
}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 42acb45e1ab4..898a570319f1 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -370,6 +370,7 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (err) {
ERROR(midi, "%s queue req: %d\n",
midi->out_ep->name, err);
+ free_ep_req(midi->out_ep, req);
}
}
@@ -545,7 +546,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
}
}
- if (req->length > 0) {
+ if (req->length > 0 && ep->enabled) {
int err;
err = usb_ep_queue(ep, req, GFP_ATOMIC);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 289ebca316d3..ad8c9b05572d 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -20,7 +20,7 @@
#define UVC_ATTR(prefix, cname, aname) \
static struct configfs_attribute prefix##attr_##cname = { \
.ca_name = __stringify(aname), \
- .ca_mode = S_IRUGO, \
+ .ca_mode = S_IRUGO | S_IWUGO, \
.ca_owner = THIS_MODULE, \
.show = prefix##cname##_show, \
.store = prefix##cname##_store, \
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 670ac0b12f00..001a3b74a993 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2536,6 +2536,9 @@ static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
udc->pullup_resume = udc->pullup_on;
dplus_pullup(udc, 0);
+ if (udc->driver)
+ udc->driver->disconnect(&udc->gadget);
+
return 0;
}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 342ffd140122..8c6e15bd6ff0 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -473,6 +473,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
+ pdev->dev.platform_data = pdata;
+
if (!of_property_read_u32(np, "num-ports", &ports))
pdata->ports = ports;
@@ -483,6 +485,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
*/
if (i >= pdata->ports) {
pdata->vbus_pin[i] = -EINVAL;
+ pdata->overcurrent_pin[i] = -EINVAL;
continue;
}
@@ -513,10 +516,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
}
at91_for_each_port(i) {
- if (i >= pdata->ports) {
- pdata->overcurrent_pin[i] = -EINVAL;
- continue;
- }
+ if (i >= pdata->ports)
+ break;
pdata->overcurrent_pin[i] =
of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
@@ -552,8 +553,6 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
}
}
- pdev->dev.platform_data = pdata;
-
device_init_wakeup(&pdev->dev, 1);
return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index dc31c425ce01..9f1c0538b211 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -377,6 +377,10 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
if (std->pl_virt == NULL)
return -ENOMEM;
std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
+ kfree(std->pl_virt);
+ return -EFAULT;
+ }
for (p = 0; p < std->num_pointers; p++) {
std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0230965fb78c..f980c239eded 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -733,8 +733,30 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
if ((raw_port_status & PORT_RESET) ||
!(raw_port_status & PORT_PE))
return 0xffffffff;
- if (time_after_eq(jiffies,
- bus_state->resume_done[wIndex])) {
+ /* did port event handler already start resume timing? */
+ if (!bus_state->resume_done[wIndex]) {
+ /* If not, maybe we are in a host initated resume? */
+ if (test_bit(wIndex, &bus_state->resuming_ports)) {
+ /* Host initated resume doesn't time the resume
+ * signalling using resume_done[].
+ * It manually sets RESUME state, sleeps 20ms
+ * and sets U0 state. This should probably be
+ * changed, but not right now.
+ */
+ } else {
+ /* port resume was discovered now and here,
+ * start resume timing
+ */
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+
+ set_bit(wIndex, &bus_state->resuming_ports);
+ bus_state->resume_done[wIndex] = timeout;
+ mod_timer(&hcd->rh_timer, timeout);
+ }
+ /* Has resume been signalled for USB_RESUME_TIME yet? */
+ } else if (time_after_eq(jiffies,
+ bus_state->resume_done[wIndex])) {
int time_left;
xhci_dbg(xhci, "Resume USB2 port %d\n",
@@ -775,13 +797,26 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
} else {
/*
* The resume has been signaling for less than
- * 20ms. Report the port status as SUSPEND,
- * let the usbcore check port status again
- * and clear resume signaling later.
+ * USB_RESUME_TIME. Report the port status as SUSPEND,
+ * let the usbcore check port status again and clear
+ * resume signaling later.
*/
status |= USB_PORT_STAT_SUSPEND;
}
}
+ /*
+ * Clear stale usb2 resume signalling variables in case port changed
+ * state during resume signalling. For example on error
+ */
+ if ((bus_state->resume_done[wIndex] ||
+ test_bit(wIndex, &bus_state->resuming_ports)) &&
+ (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
+ (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
+ bus_state->resume_done[wIndex] = 0;
+ clear_bit(wIndex, &bus_state->resuming_ports);
+ }
+
+
if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
(raw_port_status & PORT_POWER)) {
if (bus_state->suspended_ports & (1 << wIndex)) {
@@ -1115,6 +1150,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if ((temp & PORT_PE) == 0)
goto error;
+ set_bit(wIndex, &bus_state->resuming_ports);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1122,6 +1158,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
+ clear_bit(wIndex, &bus_state->resuming_ports);
}
bus_state->port_c_suspend |= 1 << wIndex;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 17f6897acde2..c62109091d12 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -188,10 +188,14 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
};
- acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1,
+ NULL);
+ ACPI_FREE(obj);
}
#else
- static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
#endif /* CONFIG_ACPI */
/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6c5e8133cf87..eeaa6c6bd540 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1583,7 +1583,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
*/
bogus_port_status = true;
goto cleanup;
- } else {
+ } else if (!test_bit(faked_port_index,
+ &bus_state->resuming_ports)) {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dfa44d3e8eee..3f912705dcef 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4778,8 +4778,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
+ /*
+ * refer to section 6.2.2: MTT should be 0 for full speed hub,
+ * but it may be already set to 1 when setup an xHCI virtual
+ * device, so clear it anyway.
+ */
if (tt->multi)
slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
+ else if (hdev->speed == USB_SPEED_FULL)
+ slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
+
if (xhci->hci_version > 0x95) {
xhci_dbg(xhci, "xHCI version %x needs hub "
"TT think time and number of ports\n",
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 1f2037bbeb0d..45c83baf675d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -159,7 +159,7 @@ config USB_TI_CPPI_DMA
config USB_TI_CPPI41_DMA
bool 'TI CPPI 4.1 (AM335x)'
- depends on ARCH_OMAP
+ depends on ARCH_OMAP && DMADEVICES
select TI_CPPI41
config USB_TUSB_OMAP_DMA
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 18cfc0a361cb..ee9ff7028b92 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2017,7 +2017,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
/* We need musb_read/write functions initialized for PM */
pm_runtime_use_autosuspend(musb->controller);
pm_runtime_set_autosuspend_delay(musb->controller, 200);
- pm_runtime_irq_safe(musb->controller);
pm_runtime_enable(musb->controller);
/* The musb_platform_init() call:
@@ -2095,6 +2094,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
#ifndef CONFIG_MUSB_PIO_ONLY
if (!musb->ops->dma_init || !musb->ops->dma_exit) {
dev_err(dev, "DMA controller not set\n");
+ status = -ENODEV;
goto fail2;
}
musb_dma_controller_create = musb->ops->dma_init;
@@ -2218,6 +2218,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
pm_runtime_put(musb->controller);
+ /*
+ * For why this is currently needed, see commit 3e43a0725637
+ * ("usb: musb: core: add pm_runtime_irq_safe()")
+ */
+ pm_runtime_irq_safe(musb->controller);
+
return 0;
fail5:
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 80eb991c2506..0d19a6d61a71 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1506,7 +1506,6 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
{
struct msm_otg_platform_data *pdata;
struct extcon_dev *ext_id, *ext_vbus;
- const struct of_device_id *id;
struct device_node *node = pdev->dev.of_node;
struct property *prop;
int len, ret, words;
@@ -1518,8 +1517,9 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
motg->pdata = pdata;
- id = of_match_device(msm_otg_dt_match, &pdev->dev);
- pdata->phy_type = (enum msm_usb_phy_type) id->data;
+ pdata->phy_type = (enum msm_usb_phy_type)of_device_get_match_data(&pdev->dev);
+ if (!pdata->phy_type)
+ return 1;
motg->link_rst = devm_reset_control_get(&pdev->dev, "link");
if (IS_ERR(motg->link_rst))
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index b7536af777ab..c2936dc48ca7 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -143,12 +143,17 @@ static const struct mxs_phy_data imx6sx_phy_data = {
.flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
};
+static const struct mxs_phy_data imx6ul_phy_data = {
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
+};
+
static const struct of_device_id mxs_phy_dt_ids[] = {
{ .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
{ .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
{ .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
{ .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
{ .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, },
+ { .compatible = "fsl,imx6ul-usbphy", .data = &imx6ul_phy_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index de4f97d84a82..8f7a78e70975 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -131,7 +131,8 @@ static void __usbhsg_queue_pop(struct usbhsg_uep *uep,
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
- dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe));
+ if (pipe)
+ dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe));
ureq->req.status = status;
spin_unlock(usbhs_priv_to_lock(priv));
@@ -685,7 +686,13 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
- usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
+ if (pipe)
+ usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
+
+ /*
+ * To dequeue a request, this driver should call the usbhsg_queue_pop()
+ * even if the pipe is NULL.
+ */
usbhsg_queue_pop(uep, ureq, -ECONNRESET);
return 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index eac7ccaa3c85..7d4f51a32e66 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -132,7 +132,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
- { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 3658662898fc..a204782ae530 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -53,6 +53,7 @@ DEVICE(funsoft, FUNSOFT_IDS);
/* Infineon Flashloader driver */
#define FLASHLOADER_IDS() \
+ { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
{ USB_DEVICE(0x8087, 0x0716) }
DEVICE(flashloader, FLASHLOADER_IDS);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index e69151664436..5c66d3f7a6d0 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -796,6 +796,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
sdev->no_report_opcodes = 1;
+ /* A few buggy USB-ATA bridges don't understand FUA */
+ if (devinfo->flags & US_FL_BROKEN_FUA)
+ sdev->broken_fua = 1;
+
scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 6b2479123de7..7ffe4209067b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1987,7 +1987,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
US_FL_IGNORE_RESIDUE ),
/* Reported by Michael Büsch <m@bues.ch> */
-UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114,
+UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
"JMicron",
"USB to ATA/ATAPI Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index c85ea530085f..ccc113e83d88 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -132,7 +132,7 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
"JMicron",
"JMS567",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_REPORT_OPCODES),
+ US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index da6e2ce77495..850d86ca685b 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -31,21 +31,6 @@ menuconfig VFIO
If you don't know what to do here, say N.
-menuconfig VFIO_NOIOMMU
- bool "VFIO No-IOMMU support"
- depends on VFIO
- help
- VFIO is built on the ability to isolate devices using the IOMMU.
- Only with an IOMMU can userspace access to DMA capable devices be
- considered secure. VFIO No-IOMMU mode enables IOMMU groups for
- devices without IOMMU backing for the purpose of re-using the VFIO
- infrastructure in a non-secure mode. Use of this mode will result
- in an unsupportable kernel and will therefore taint the kernel.
- Device assignment to virtual machines is also not possible with
- this mode since there is no IOMMU to provide DMA translation.
-
- If you don't know what to do here, say N.
-
source "drivers/vfio/pci/Kconfig"
source "drivers/vfio/platform/Kconfig"
source "virt/lib/Kconfig"
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 32b88bd2c82c..56bf6dbb93db 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -940,13 +940,13 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
- group = vfio_iommu_group_get(&pdev->dev);
+ group = iommu_group_get(&pdev->dev);
if (!group)
return -EINVAL;
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
- vfio_iommu_group_put(group, &pdev->dev);
+ iommu_group_put(group);
return -ENOMEM;
}
@@ -957,7 +957,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
if (ret) {
- vfio_iommu_group_put(group, &pdev->dev);
+ iommu_group_put(group);
kfree(vdev);
return ret;
}
@@ -993,7 +993,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
if (!vdev)
return;
- vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
+ iommu_group_put(pdev->dev.iommu_group);
kfree(vdev);
if (vfio_pci_is_vga(pdev)) {
@@ -1035,7 +1035,7 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
}
-static struct pci_error_handlers vfio_err_handlers = {
+static const struct pci_error_handlers vfio_err_handlers = {
.error_detected = vfio_pci_aer_err_detected,
};
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index f1625dcfbb23..b1cc3a768784 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -92,7 +92,6 @@ static struct platform_driver vfio_platform_driver = {
.remove = vfio_platform_remove,
.driver = {
.name = "vfio-platform",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index a1c50d630792..418cdd9ba3f4 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -51,13 +51,10 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
{
- char modname[256];
-
vdev->reset = vfio_platform_lookup_reset(vdev->compat,
&vdev->reset_module);
if (!vdev->reset) {
- snprintf(modname, 256, "vfio-reset:%s", vdev->compat);
- request_module(modname);
+ request_module("vfio-reset:%s", vdev->compat);
vdev->reset = vfio_platform_lookup_reset(vdev->compat,
&vdev->reset_module);
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index de632da2e22f..6070b793cbcb 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -62,7 +62,6 @@ struct vfio_container {
struct rw_semaphore group_lock;
struct vfio_iommu_driver *iommu_driver;
void *iommu_data;
- bool noiommu;
};
struct vfio_unbound_dev {
@@ -85,7 +84,6 @@ struct vfio_group {
struct list_head unbound_list;
struct mutex unbound_lock;
atomic_t opened;
- bool noiommu;
};
struct vfio_device {
@@ -97,147 +95,6 @@ struct vfio_device {
void *device_data;
};
-#ifdef CONFIG_VFIO_NOIOMMU
-static bool noiommu __read_mostly;
-module_param_named(enable_unsafe_noiommu_support,
- noiommu, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
-#endif
-
-/*
- * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
- * and remove functions, any use cases other than acquiring the first
- * reference for the purpose of calling vfio_add_group_dev() or removing
- * that symmetric reference after vfio_del_group_dev() should use the raw
- * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
- * removes the device from the dummy group and cannot be nested.
- */
-struct iommu_group *vfio_iommu_group_get(struct device *dev)
-{
- struct iommu_group *group;
- int __maybe_unused ret;
-
- group = iommu_group_get(dev);
-
-#ifdef CONFIG_VFIO_NOIOMMU
- /*
- * With noiommu enabled, an IOMMU group will be created for a device
- * that doesn't already have one and doesn't have an iommu_ops on their
- * bus. We use iommu_present() again in the main code to detect these
- * fake groups.
- */
- if (group || !noiommu || iommu_present(dev->bus))
- return group;
-
- group = iommu_group_alloc();
- if (IS_ERR(group))
- return NULL;
-
- iommu_group_set_name(group, "vfio-noiommu");
- ret = iommu_group_add_device(group, dev);
- iommu_group_put(group);
- if (ret)
- return NULL;
-
- /*
- * Where to taint? At this point we've added an IOMMU group for a
- * device that is not backed by iommu_ops, therefore any iommu_
- * callback using iommu_ops can legitimately Oops. So, while we may
- * be about to give a DMA capable device to a user without IOMMU
- * protection, which is clearly taint-worthy, let's go ahead and do
- * it here.
- */
- add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
-#endif
-
- return group;
-}
-EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
-
-void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
-{
-#ifdef CONFIG_VFIO_NOIOMMU
- if (!iommu_present(dev->bus))
- iommu_group_remove_device(dev);
-#endif
-
- iommu_group_put(group);
-}
-EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
-
-#ifdef CONFIG_VFIO_NOIOMMU
-static void *vfio_noiommu_open(unsigned long arg)
-{
- if (arg != VFIO_NOIOMMU_IOMMU)
- return ERR_PTR(-EINVAL);
- if (!capable(CAP_SYS_RAWIO))
- return ERR_PTR(-EPERM);
-
- return NULL;
-}
-
-static void vfio_noiommu_release(void *iommu_data)
-{
-}
-
-static long vfio_noiommu_ioctl(void *iommu_data,
- unsigned int cmd, unsigned long arg)
-{
- if (cmd == VFIO_CHECK_EXTENSION)
- return arg == VFIO_NOIOMMU_IOMMU ? 1 : 0;
-
- return -ENOTTY;
-}
-
-static int vfio_iommu_present(struct device *dev, void *unused)
-{
- return iommu_present(dev->bus) ? 1 : 0;
-}
-
-static int vfio_noiommu_attach_group(void *iommu_data,
- struct iommu_group *iommu_group)
-{
- return iommu_group_for_each_dev(iommu_group, NULL,
- vfio_iommu_present) ? -EINVAL : 0;
-}
-
-static void vfio_noiommu_detach_group(void *iommu_data,
- struct iommu_group *iommu_group)
-{
-}
-
-static struct vfio_iommu_driver_ops vfio_noiommu_ops = {
- .name = "vfio-noiommu",
- .owner = THIS_MODULE,
- .open = vfio_noiommu_open,
- .release = vfio_noiommu_release,
- .ioctl = vfio_noiommu_ioctl,
- .attach_group = vfio_noiommu_attach_group,
- .detach_group = vfio_noiommu_detach_group,
-};
-
-static struct vfio_iommu_driver vfio_noiommu_driver = {
- .ops = &vfio_noiommu_ops,
-};
-
-/*
- * Wrap IOMMU drivers, the noiommu driver is the one and only driver for
- * noiommu groups (and thus containers) and not available for normal groups.
- */
-#define vfio_for_each_iommu_driver(con, pos) \
- for (pos = con->noiommu ? &vfio_noiommu_driver : \
- list_first_entry(&vfio.iommu_drivers_list, \
- struct vfio_iommu_driver, vfio_next); \
- (con->noiommu ? pos != NULL : \
- &pos->vfio_next != &vfio.iommu_drivers_list); \
- pos = con->noiommu ? NULL : list_next_entry(pos, vfio_next))
-#else
-#define vfio_for_each_iommu_driver(con, pos) \
- list_for_each_entry(pos, &vfio.iommu_drivers_list, vfio_next)
-#endif
-
-
/**
* IOMMU driver registration
*/
@@ -342,8 +199,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
/**
* Group objects - create, release, get, put, search
*/
-static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
- bool noiommu)
+static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
{
struct vfio_group *group, *tmp;
struct device *dev;
@@ -361,7 +217,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
atomic_set(&group->container_users, 0);
atomic_set(&group->opened, 0);
group->iommu_group = iommu_group;
- group->noiommu = noiommu;
group->nb.notifier_call = vfio_iommu_group_notifier;
@@ -397,8 +252,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
dev = device_create(vfio.class, NULL,
MKDEV(MAJOR(vfio.group_devt), minor),
- group, "%s%d", noiommu ? "noiommu-" : "",
- iommu_group_id(iommu_group));
+ group, "%d", iommu_group_id(iommu_group));
if (IS_ERR(dev)) {
vfio_free_group_minor(minor);
vfio_group_unlock_and_free(group);
@@ -682,7 +536,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
return 0;
/* TODO Prevent device auto probing */
- WARN("Device %s added to live group %d!\n", dev_name(dev),
+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
iommu_group_id(group->iommu_group));
return 0;
@@ -786,8 +640,7 @@ int vfio_add_group_dev(struct device *dev,
group = vfio_group_get_from_iommu(iommu_group);
if (!group) {
- group = vfio_create_group(iommu_group,
- !iommu_present(dev->bus));
+ group = vfio_create_group(iommu_group);
if (IS_ERR(group)) {
iommu_group_put(iommu_group);
return PTR_ERR(group);
@@ -999,7 +852,8 @@ static long vfio_ioctl_check_extension(struct vfio_container *container,
*/
if (!driver) {
mutex_lock(&vfio.iommu_drivers_lock);
- vfio_for_each_iommu_driver(container, driver) {
+ list_for_each_entry(driver, &vfio.iommu_drivers_list,
+ vfio_next) {
if (!try_module_get(driver->ops->owner))
continue;
@@ -1068,7 +922,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
}
mutex_lock(&vfio.iommu_drivers_lock);
- vfio_for_each_iommu_driver(container, driver) {
+ list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
void *data;
if (!try_module_get(driver->ops->owner))
@@ -1333,9 +1187,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
if (atomic_read(&group->container_users))
return -EINVAL;
- if (group->noiommu && !capable(CAP_SYS_RAWIO))
- return -EPERM;
-
f = fdget(container_fd);
if (!f.file)
return -EBADF;
@@ -1351,13 +1202,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
down_write(&container->group_lock);
- /* Real groups and fake groups cannot mix */
- if (!list_empty(&container->group_list) &&
- container->noiommu != group->noiommu) {
- ret = -EPERM;
- goto unlock_out;
- }
-
driver = container->iommu_driver;
if (driver) {
ret = driver->ops->attach_group(container->iommu_data,
@@ -1367,7 +1211,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
}
group->container = container;
- container->noiommu = group->noiommu;
list_add(&group->container_next, &container->group_list);
/* Get a reference on the container and mark a user within the group */
@@ -1398,9 +1241,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
!group->container->iommu_driver || !vfio_group_viable(group))
return -EINVAL;
- if (group->noiommu && !capable(CAP_SYS_RAWIO))
- return -EPERM;
-
device = vfio_device_get_from_name(group, buf);
if (!device)
return -ENODEV;
@@ -1443,10 +1283,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
fd_install(ret, filep);
- if (group->noiommu)
- dev_warn(device->dev, "vfio-noiommu device opened by user "
- "(%s:%d)\n", current->comm, task_pid_nr(current));
-
return ret;
}
@@ -1535,11 +1371,6 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
if (!group)
return -ENODEV;
- if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
- vfio_group_put(group);
- return -EPERM;
- }
-
/* Do we need multiple instances of the group open? Seems not. */
opened = atomic_cmpxchg(&group->opened, 0, 1);
if (opened) {
@@ -1702,11 +1533,6 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
if (!atomic_inc_not_zero(&group->container_users))
return ERR_PTR(-EINVAL);
- if (group->noiommu) {
- atomic_dec(&group->container_users);
- return ERR_PTR(-EPERM);
- }
-
if (!group->container->iommu_driver ||
!vfio_group_viable(group)) {
atomic_dec(&group->container_users);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index eec2f11809ff..ad2146a9ab2d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -819,7 +819,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
(a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
- (a.log_guest_addr & (sizeof(u64) - 1))) {
+ (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
r = -EINVAL;
break;
}
@@ -1369,7 +1369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
if (unlikely(__get_user(ring_head,
- &vq->avail->ring[last_avail_idx % vq->num]))) {
+ &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
&vq->avail->ring[last_avail_idx % vq->num]);
@@ -1489,7 +1489,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
u16 old, new;
int start;
- start = vq->last_used_idx % vq->num;
+ start = vq->last_used_idx & (vq->num - 1);
used = vq->used->ring + start;
if (count == 1) {
if (__put_user(heads[0].id, &used->id)) {
@@ -1531,7 +1531,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
{
int start, n, r;
- start = vq->last_used_idx % vq->num;
+ start = vq->last_used_idx & (vq->num - 1);
n = vq->num - start;
if (n < count) {
r = __vhost_add_used_n(vq, heads, n);
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index b335c1ae8625..fe00a07c122e 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -479,7 +479,10 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
port = FSL_DIU_PORT_DLVDS;
}
- return diu_ops.valid_monitor_port(port);
+ if (diu_ops.valid_monitor_port)
+ port = diu_ops.valid_monitor_port(port);
+
+ return port;
}
/*
@@ -1915,6 +1918,14 @@ static int __init fsl_diu_init(void)
#else
monitor_port = fsl_diu_name_to_port(monitor_string);
#endif
+
+ /*
+ * Must to verify set_pixel_clock. If not implement on platform,
+ * then that means that there is no platform support for the DIU.
+ */
+ if (!diu_ops.set_pixel_clock)
+ return -ENODEV;
+
pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index 99ca268c1cdd..d05a54922ba6 100644
--- a/drivers/video/fbdev/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
@@ -275,6 +275,12 @@ const struct omap_video_timings omap_dss_pal_timings = {
.vbp = 41,
.interlace = true,
+
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
EXPORT_SYMBOL(omap_dss_pal_timings);
@@ -290,6 +296,12 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
.vbp = 31,
.interlace = true,
+
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
EXPORT_SYMBOL(omap_dss_ntsc_timings);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b1877d73fa56..7062bb0975a5 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -412,6 +412,7 @@ static int virtio_init(void)
static void __exit virtio_exit(void)
{
bus_unregister(&virtio_bus);
+ ida_destroy(&virtio_index_ida);
}
core_initcall(virtio_init);
module_exit(virtio_exit);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 096b857e7b75..ee663c458b20 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -80,6 +80,12 @@ struct vring_virtqueue {
/* Last used index we've seen. */
u16 last_used_idx;
+ /* Last written value to avail->flags */
+ u16 avail_flags_shadow;
+
+ /* Last written value to avail->idx in guest byte order */
+ u16 avail_idx_shadow;
+
/* How to notify other side. FIXME: commonalize hcalls! */
bool (*notify)(struct virtqueue *vq);
@@ -109,7 +115,7 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
* otherwise virt_to_phys will give us bogus addresses in the
* virtqueue.
*/
- gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
+ gfp &= ~__GFP_HIGHMEM;
desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
if (!desc)
@@ -235,13 +241,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
- avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1);
+ avail = vq->avail_idx_shadow & (vq->vring.num - 1);
vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb(vq->weak_barriers);
- vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
+ vq->avail_idx_shadow++;
+ vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
vq->num_added++;
pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -354,8 +361,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
* event. */
virtio_mb(vq->weak_barriers);
- old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added;
- new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx);
+ old = vq->avail_idx_shadow - vq->num_added;
+ new = vq->avail_idx_shadow;
vq->num_added = 0;
#ifdef DEBUG
@@ -510,7 +517,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
/* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
* the read in the next get_buf call. */
- if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) {
+ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
virtio_mb(vq->weak_barriers);
}
@@ -537,7 +544,11 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT);
+ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
+ vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ }
+
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
@@ -565,7 +576,10 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
- vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
+ if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
+ vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ }
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
END_USE(vq);
return last_used_idx;
@@ -633,9 +647,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
- vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
+ if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
+ vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ }
/* TODO: tune this threshold */
- bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4;
+ bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
virtio_mb(vq->weak_barriers);
if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
@@ -670,7 +687,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
/* detach_buf clears data, so grab it now. */
buf = vq->data[i];
detach_buf(vq, i);
- vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1);
+ vq->avail_idx_shadow--;
+ vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
END_USE(vq);
return buf;
}
@@ -735,6 +753,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
+ vq->avail_flags_shadow = 0;
+ vq->avail_idx_shadow = 0;
vq->num_added = 0;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
@@ -746,8 +766,10 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
/* No callback? Tell other side not to bother us. */
- if (!callback)
- vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT);
+ if (!callback) {
+ vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+ }
/* Put everything in free lists. */
vq->free_head = 0;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 699941e90667..511078586fa1 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -451,9 +451,9 @@ void v9fs_evict_inode(struct inode *inode)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
- truncate_inode_pages_final(inode->i_mapping);
+ truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
- filemap_fdatawrite(inode->i_mapping);
+ filemap_fdatawrite(&inode->i_data);
v9fs_cache_inode_put_cookie(inode);
/* clunk the fid stashed in writeback_fid */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c25639e907bd..44d4a1e9244e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1523,11 +1523,14 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
WARN_ON_ONCE(bdev->bd_holders);
sync_blockdev(bdev);
kill_bdev(bdev);
+
+ bdev_write_inode(bdev);
/*
- * ->release can cause the queue to disappear, so flush all
- * dirty data before.
+ * Detaching bdev inode from its wb in __destroy_inode()
+ * is too late: the queue which embeds its bdi (along with
+ * root wb) can be gone as soon as we put_disk() below.
*/
- bdev_write_inode(bdev);
+ inode_detach_wb(bdev->bd_inode);
}
if (bdev->bd_contains == bdev) {
if (disk->fops->release)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6b66dd5d1540..a329f5ba35aa 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1831,11 +1831,11 @@ cifs_invalidate_mapping(struct inode *inode)
* @word: long word containing the bit lock
*/
static int
-cifs_wait_bit_killable(struct wait_bit_key *key)
+cifs_wait_bit_killable(struct wait_bit_key *key, int mode)
{
- if (fatal_signal_pending(current))
- return -ERESTARTSYS;
freezable_schedule_unsafe();
+ if (signal_pending_state(mode, current))
+ return -ERESTARTSYS;
return 0;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1c75a3a07f8f..602e8441bc0f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1175,6 +1175,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
if (dio->flags & DIO_LOCKING)
mutex_unlock(&inode->i_mutex);
kmem_cache_free(dio_cache, dio);
+ retval = 0;
goto out;
}
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 73c64daa0f55..60f03b78914e 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -592,10 +592,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
}
unlock_page(page);
}
- if (PageDirty(page) || PageWriteback(page))
- *uptodate = true;
- else
- *uptodate = PageUptodate(page);
+ *uptodate = PageUptodate(page);
EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate);
return page;
} else {
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index af06830bfc00..1a0835073663 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -389,7 +389,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
struct bio *bio;
- ext4_lblk_t lblk = ex->ee_block;
+ ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
ext4_fsblk_t pblk = ext4_ext_pblock(ex);
unsigned int len = ext4_ext_get_actual_len(ex);
int ret, err = 0;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 750063f7a50c..cc7ca4e87144 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -26,6 +26,7 @@
#include <linux/seqlock.h>
#include <linux/mutex.h>
#include <linux/timer.h>
+#include <linux/version.h>
#include <linux/wait.h>
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
@@ -727,19 +728,55 @@ struct move_extent {
<= (EXT4_GOOD_OLD_INODE_SIZE + \
(einode)->i_extra_isize)) \
+/*
+ * We use an encoding that preserves the times for extra epoch "00":
+ *
+ * extra msb of adjust for signed
+ * epoch 32-bit 32-bit tv_sec to
+ * bits time decoded 64-bit tv_sec 64-bit tv_sec valid time range
+ * 0 0 1 -0x80000000..-0x00000001 0x000000000 1901-12-13..1969-12-31
+ * 0 0 0 0x000000000..0x07fffffff 0x000000000 1970-01-01..2038-01-19
+ * 0 1 1 0x080000000..0x0ffffffff 0x100000000 2038-01-19..2106-02-07
+ * 0 1 0 0x100000000..0x17fffffff 0x100000000 2106-02-07..2174-02-25
+ * 1 0 1 0x180000000..0x1ffffffff 0x200000000 2174-02-25..2242-03-16
+ * 1 0 0 0x200000000..0x27fffffff 0x200000000 2242-03-16..2310-04-04
+ * 1 1 1 0x280000000..0x2ffffffff 0x300000000 2310-04-04..2378-04-22
+ * 1 1 0 0x300000000..0x37fffffff 0x300000000 2378-04-22..2446-05-10
+ *
+ * Note that previous versions of the kernel on 64-bit systems would
+ * incorrectly use extra epoch bits 1,1 for dates between 1901 and
+ * 1970. e2fsck will correct this, assuming that it is run on the
+ * affected filesystem before 2242.
+ */
+
static inline __le32 ext4_encode_extra_time(struct timespec *time)
{
- return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
- (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) |
- ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK));
+ u32 extra = sizeof(time->tv_sec) > 4 ?
+ ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
+ return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
}
static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
{
- if (sizeof(time->tv_sec) > 4)
- time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
- << 32;
- time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+ if (unlikely(sizeof(time->tv_sec) > 4 &&
+ (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
+ /* Handle legacy encoding of pre-1970 dates with epoch
+ * bits 1,1. We assume that by kernel version 4.20,
+ * everyone will have run fsck over the affected
+ * filesystems to correct the problem. (This
+ * backwards compatibility may be removed before this
+ * time, at the discretion of the ext4 developers.)
+ */
+ u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
+ if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
+ extra_bits = 0;
+ time->tv_sec += extra_bits << 32;
+#else
+ time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
+#endif
+ }
+ time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
}
#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index abe2401ce405..e8e7af62ac95 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -52,7 +52,7 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook
/* Symlink is encrypted */
sd = (struct ext4_encrypted_symlink_data *)caddr;
cstr.name = sd->encrypted_path;
- cstr.len = le32_to_cpu(sd->len);
+ cstr.len = le16_to_cpu(sd->len);
if ((cstr.len +
sizeof(struct ext4_encrypted_symlink_data) - 1) >
max_size) {
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 1b57c72f4a00..1420a3c614af 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -358,7 +358,7 @@ static int name##_open(struct inode *inode, struct file *file) \
return single_open(file, ext4_seq_##name##_show, PDE_DATA(inode)); \
} \
\
-const struct file_operations ext4_seq_##name##_fops = { \
+static const struct file_operations ext4_seq_##name##_fops = { \
.owner = THIS_MODULE, \
.open = name##_open, \
.read = seq_read, \
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index eae2c11268bc..8e3ee1936c7e 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -549,6 +549,8 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
unregister_chrdev_region(cc->cdev->dev, 1);
cdev_del(cc->cdev);
}
+ /* Base reference is now owned by "fud" */
+ fuse_conn_put(&cc->fc);
rc = fuse_dev_release(inode, file); /* puts the base reference */
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e0faf8f2c868..570ca4053c80 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1049,6 +1049,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
flush_dcache_page(page);
+ iov_iter_advance(ii, tmp);
if (!tmp) {
unlock_page(page);
page_cache_release(page);
@@ -1061,7 +1062,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
req->page_descs[req->num_pages].length = tmp;
req->num_pages++;
- iov_iter_advance(ii, tmp);
count += tmp;
pos += tmp;
offset += tmp;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 89463eee6791..ca181e81c765 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1009,7 +1009,8 @@ out:
}
/* Fast check whether buffer is already attached to the required transaction */
-static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh)
+static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
+ bool undo)
{
struct journal_head *jh;
bool ret = false;
@@ -1036,6 +1037,9 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh)
jh = READ_ONCE(bh->b_private);
if (!jh)
goto out;
+ /* For undo access buffer must have data copied */
+ if (undo && !jh->b_committed_data)
+ goto out;
if (jh->b_transaction != handle->h_transaction &&
jh->b_next_transaction != handle->h_transaction)
goto out;
@@ -1073,7 +1077,7 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int rc;
- if (jbd2_write_access_granted(handle, bh))
+ if (jbd2_write_access_granted(handle, bh, false))
return 0;
jh = jbd2_journal_add_journal_head(bh);
@@ -1210,7 +1214,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
char *committed_data = NULL;
JBUFFER_TRACE(jh, "entry");
- if (jbd2_write_access_granted(handle, bh))
+ if (jbd2_write_access_granted(handle, bh, true))
return 0;
jh = jbd2_journal_add_journal_head(bh);
@@ -2152,6 +2156,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
if (!buffer_dirty(bh)) {
/* bdflush has written it. We can drop it now */
+ __jbd2_journal_remove_checkpoint(jh);
goto zap_buffer;
}
@@ -2181,6 +2186,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
/* The orphan record's transaction has
* committed. We can cleanse this buffer */
clear_buffer_jbddirty(bh);
+ __jbd2_journal_remove_checkpoint(jh);
goto zap_buffer;
}
}
diff --git a/fs/namei.c b/fs/namei.c
index d84d7c7515fc..0c3974cd3ecd 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1996,7 +1996,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
- nd->total_link_count = 0;
if (flags & LOOKUP_ROOT) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index beac58b0e09c..646cdac73488 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -78,8 +78,7 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
p = xdr_inline_decode(xdr, nbytes);
if (unlikely(p == NULL))
- printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
- "or truncated request.\n");
+ printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
return p;
}
@@ -890,7 +889,6 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
struct cb_compound_hdr_arg hdr_arg = { 0 };
struct cb_compound_hdr_res hdr_res = { NULL };
struct xdr_stream xdr_in, xdr_out;
- struct xdr_buf *rq_arg = &rqstp->rq_arg;
__be32 *p, status;
struct cb_process_state cps = {
.drc_status = 0,
@@ -902,8 +900,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
dprintk("%s: start\n", __func__);
- rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;
- xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
+ xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 31b0a52223a7..c7e8b87da5b2 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -75,11 +75,11 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
* nfs_wait_bit_killable - helper for functions that are sleeping on bit locks
* @word: long word containing the bit lock
*/
-int nfs_wait_bit_killable(struct wait_bit_key *key)
+int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
{
- if (fatal_signal_pending(current))
- return -ERESTARTSYS;
freezable_schedule_unsafe();
+ if (signal_pending_state(mode, current))
+ return -ERESTARTSYS;
return 0;
}
EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 56cfde26fb9c..9dea85f7f918 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -379,7 +379,7 @@ extern int nfs_drop_inode(struct inode *);
extern void nfs_clear_inode(struct inode *);
extern void nfs_evict_inode(struct inode *);
void nfs_zap_acl_cache(struct inode *inode);
-extern int nfs_wait_bit_killable(struct wait_bit_key *key);
+extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
/* super.c */
extern const struct super_operations nfs_sops;
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 5c0c6b58157f..9aebffb40505 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -476,10 +476,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
}
unlock_page(page);
}
- if (PageDirty(page) || PageWriteback(page))
- *uptodate = true;
- else
- *uptodate = PageUptodate(page);
+ *uptodate = PageUptodate(page);
dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate);
return page;
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index fe3ddd20ff89..452a011ba0d8 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -129,7 +129,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
set_bit(NFS_IO_INPROGRESS, &c->flags);
if (atomic_read(&c->io_count) == 0)
break;
- ret = nfs_wait_bit_killable(&q.key);
+ ret = nfs_wait_bit_killable(&q.key, TASK_KILLABLE);
} while (atomic_read(&c->io_count) != 0 && !ret);
finish_wait(wq, &q.wait);
return ret;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 5a8ae2125b50..bec0384499f7 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1466,11 +1466,11 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
}
/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
-static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key)
+static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode)
{
if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
return 1;
- return nfs_wait_bit_killable(key);
+ return nfs_wait_bit_killable(key, mode);
}
static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a03f6f433075..3123408da935 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -367,13 +367,11 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
- status = posix_acl_create(dir, &mode, &default_acl, &acl);
+ status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (status) {
mlog_errno(status);
goto leave;
}
- /* update inode->i_mode after mask with "umask". */
- inode->i_mode = mode;
handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
S_ISDIR(mode),
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 871fcb67be97..0a8983492d91 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -195,8 +195,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
struct dentry *dentry, struct path *lowerpath,
- struct kstat *stat, struct iattr *attr,
- const char *link)
+ struct kstat *stat, const char *link)
{
struct inode *wdir = workdir->d_inode;
struct inode *udir = upperdir->d_inode;
@@ -240,8 +239,6 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
mutex_lock(&newdentry->d_inode->i_mutex);
err = ovl_set_attr(newdentry, stat);
- if (!err && attr)
- err = notify_change(newdentry, attr, NULL);
mutex_unlock(&newdentry->d_inode->i_mutex);
if (err)
goto out_cleanup;
@@ -286,8 +283,7 @@ out_cleanup:
* that point the file will have already been copied up anyway.
*/
int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
- struct path *lowerpath, struct kstat *stat,
- struct iattr *attr)
+ struct path *lowerpath, struct kstat *stat)
{
struct dentry *workdir = ovl_workdir(dentry);
int err;
@@ -345,26 +341,19 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
}
upperdentry = ovl_dentry_upper(dentry);
if (upperdentry) {
- unlock_rename(workdir, upperdir);
+ /* Raced with another copy-up? Nothing to do, then... */
err = 0;
- /* Raced with another copy-up? Do the setattr here */
- if (attr) {
- mutex_lock(&upperdentry->d_inode->i_mutex);
- err = notify_change(upperdentry, attr, NULL);
- mutex_unlock(&upperdentry->d_inode->i_mutex);
- }
- goto out_put_cred;
+ goto out_unlock;
}
err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath,
- stat, attr, link);
+ stat, link);
if (!err) {
/* Restore timestamps on parent (best effort) */
ovl_set_timestamps(upperdir, &pstat);
}
out_unlock:
unlock_rename(workdir, upperdir);
-out_put_cred:
revert_creds(old_cred);
put_cred(override_cred);
@@ -406,7 +395,7 @@ int ovl_copy_up(struct dentry *dentry)
ovl_path_lower(next, &lowerpath);
err = vfs_getattr(&lowerpath, &stat);
if (!err)
- err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL);
+ err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
dput(parent);
dput(next);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index ec0c2a050043..4060ffde8722 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -12,8 +12,7 @@
#include <linux/xattr.h>
#include "overlayfs.h"
-static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr,
- bool no_data)
+static int ovl_copy_up_truncate(struct dentry *dentry)
{
int err;
struct dentry *parent;
@@ -30,10 +29,8 @@ static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr,
if (err)
goto out_dput_parent;
- if (no_data)
- stat.size = 0;
-
- err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr);
+ stat.size = 0;
+ err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
out_dput_parent:
dput(parent);
@@ -49,13 +46,13 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
goto out;
- upperdentry = ovl_dentry_upper(dentry);
- if (upperdentry) {
+ err = ovl_copy_up(dentry);
+ if (!err) {
+ upperdentry = ovl_dentry_upper(dentry);
+
mutex_lock(&upperdentry->d_inode->i_mutex);
err = notify_change(upperdentry, attr, NULL);
mutex_unlock(&upperdentry->d_inode->i_mutex);
- } else {
- err = ovl_copy_up_last(dentry, attr, false);
}
ovl_drop_write(dentry);
out:
@@ -353,7 +350,7 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
return ERR_PTR(err);
if (file_flags & O_TRUNC)
- err = ovl_copy_up_last(dentry, NULL, true);
+ err = ovl_copy_up_truncate(dentry);
else
err = ovl_copy_up(dentry);
ovl_drop_write(dentry);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index ea5a40b06e3a..e17154aeaae4 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -194,7 +194,6 @@ void ovl_cleanup(struct inode *dir, struct dentry *dentry);
/* copy_up.c */
int ovl_copy_up(struct dentry *dentry);
int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
- struct path *lowerpath, struct kstat *stat,
- struct iattr *attr);
+ struct path *lowerpath, struct kstat *stat);
int ovl_copy_xattr(struct dentry *old, struct dentry *new);
int ovl_set_attr(struct dentry *upper, struct kstat *stat);
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index db284bff29dc..9dbb739cafa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -5,7 +5,7 @@
* Copyright 2001 Red Hat, Inc.
* Based on code from mm/memory.c Copyright Linus Torvalds and others.
*
- * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright 2011 Red Hat, Inc., Peter Zijlstra
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 0b921ae06cd8..0a271ca1f7c7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -309,6 +309,11 @@ struct drm_file {
unsigned universal_planes:1;
/* true if client understands atomic properties */
unsigned atomic:1;
+ /*
+ * This client is allowed to gain master privileges for @master.
+ * Protected by struct drm_device::master_mutex.
+ */
+ unsigned allowed_master:1;
struct pid *pid;
kuid_t uid;
@@ -910,6 +915,7 @@ extern int drm_open(struct inode *inode, struct file *filp);
extern ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
+extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
/* Mapping support (drm_vm.h) */
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -947,6 +953,10 @@ extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
struct drm_pending_vblank_event *e);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
+extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
+ struct drm_pending_vblank_event *e);
+extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 054833939995..1991aea2ec4c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
}
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, const char *cells_name,
- size_t index, struct acpi_reference_args *args)
+ const char *name, size_t index,
+ struct acpi_reference_args *args)
{
return -ENXIO;
}
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 2b8ed123ad36..defeaac0745f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
- return (word << shift) | (word >> (32 - shift));
+ return (word << shift) | (word >> ((-shift) & 31));
}
/**
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c06f8eaa42ff..0169ba2e2e64 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -254,6 +254,7 @@ struct queue_limits {
unsigned long virt_boundary_mask;
unsigned int max_hw_sectors;
+ unsigned int max_dev_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors;
unsigned int max_segment_size;
@@ -959,7 +960,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 097901a68671..e5f4164cbd99 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -90,7 +90,6 @@ enum {
*/
struct cgroup_file {
/* do not access any fields from outside cgroup core */
- struct list_head node; /* anchored at css->files */
struct kernfs_node *kn;
};
@@ -134,9 +133,6 @@ struct cgroup_subsys_state {
*/
u64 serial_nr;
- /* all cgroup_files associated with this css */
- struct list_head files;
-
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -440,12 +436,9 @@ struct cgroup_subsys {
void (*css_reset)(struct cgroup_subsys_state *css);
void (*css_e_css_changed)(struct cgroup_subsys_state *css);
- int (*can_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*cancel_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
+ int (*can_attach)(struct cgroup_taskset *tset);
+ void (*cancel_attach)(struct cgroup_taskset *tset);
+ void (*attach)(struct cgroup_taskset *tset);
int (*can_fork)(struct task_struct *task, void **priv_p);
void (*cancel_fork)(struct task_struct *task, void *priv);
void (*fork)(struct task_struct *task, void *priv);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a8ba1ea0ea5a..322a28482745 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -89,6 +89,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
+void cgroup_file_notify(struct cgroup_file *cfile);
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
@@ -120,8 +121,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state
struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
-struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
-struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
+ struct cgroup_subsys_state **dst_cssp);
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
+ struct cgroup_subsys_state **dst_cssp);
void css_task_iter_start(struct cgroup_subsys_state *css,
struct css_task_iter *it);
@@ -236,30 +239,39 @@ void css_task_iter_end(struct css_task_iter *it);
/**
* cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor
+ * @dst_css: the destination css
* @tset: taskset to iterate
*
* @tset may contain multiple tasks and they may belong to multiple
- * processes. When there are multiple tasks in @tset, if a task of a
- * process is in @tset, all tasks of the process are in @tset. Also, all
- * are guaranteed to share the same source and destination csses.
+ * processes.
+ *
+ * On the v2 hierarchy, there may be tasks from multiple processes and they
+ * may not share the source or destination csses.
+ *
+ * On traditional hierarchies, when there are multiple tasks in @tset, if a
+ * task of a process is in @tset, all tasks of the process are in @tset.
+ * Also, all are guaranteed to share the same source and destination csses.
*
* Iteration is not in any specific order.
*/
-#define cgroup_taskset_for_each(task, tset) \
- for ((task) = cgroup_taskset_first((tset)); (task); \
- (task) = cgroup_taskset_next((tset)))
+#define cgroup_taskset_for_each(task, dst_css, tset) \
+ for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
+ (task); \
+ (task) = cgroup_taskset_next((tset), &(dst_css)))
/**
* cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
* @leader: the loop cursor
+ * @dst_css: the destination css
* @tset: takset to iterate
*
* Iterate threadgroup leaders of @tset. For single-task migrations, @tset
* may not contain any.
*/
-#define cgroup_taskset_for_each_leader(leader, tset) \
- for ((leader) = cgroup_taskset_first((tset)); (leader); \
- (leader) = cgroup_taskset_next((tset))) \
+#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
+ for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
+ (leader); \
+ (leader) = cgroup_taskset_next((tset), &(dst_css))) \
if ((leader) != (leader)->group_leader) \
; \
else
@@ -539,19 +551,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-/**
- * cgroup_file_notify - generate a file modified event for a cgroup_file
- * @cfile: target cgroup_file
- *
- * @cfile must have been obtained by setting cftype->file_offset.
- */
-static inline void cgroup_file_notify(struct cgroup_file *cfile)
-{
- /* might not have been created due to one of the CFTYPE selector flags */
- if (cfile->kn)
- kernfs_notify(cfile->kn);
-}
-
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ef4c5b1a860f..177c7680c1a8 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -77,6 +77,7 @@ struct cpufreq_policy {
unsigned int suspend_freq; /* freq to set during suspend */
unsigned int policy; /* see above */
+ unsigned int last_policy; /* policy before unplug */
struct cpufreq_governor *governor; /* see below */
void *governor_data;
bool governor_enabled; /* governor start/stop flag */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c9ae0c6ec050..d5d798b35c1f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -330,6 +330,7 @@ struct rdists {
};
struct irq_domain;
+struct device_node;
int its_cpu_init(void);
int its_init(struct device_node *node, struct rdists *rdists,
struct irq_domain *domain);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 8dde55974f18..0536524bb9eb 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -5,7 +5,7 @@
* Jump label support
*
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
- * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
*
* DEPRECATED API:
*
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index d0a1f99e24e3..4894c6888bc6 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -25,7 +25,7 @@
#ifdef CONFIG_DEBUG_KMEMLEAK
-extern void kmemleak_init(void) __ref;
+extern void kmemleak_init(void) __init;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 83577f8fd15b..600c1e0626a5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index c6916aec43b6..034117b3be5f 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -50,9 +50,16 @@ enum {
NVM_IO_DUAL_ACCESS = 0x1,
NVM_IO_QUAD_ACCESS = 0x2,
+ /* NAND Access Modes */
NVM_IO_SUSPEND = 0x80,
NVM_IO_SLC_MODE = 0x100,
NVM_IO_SCRAMBLE_DISABLE = 0x200,
+
+ /* Block Types */
+ NVM_BLK_T_FREE = 0x0,
+ NVM_BLK_T_BAD = 0x1,
+ NVM_BLK_T_DEV = 0x2,
+ NVM_BLK_T_HOST = 0x4,
};
struct nvm_id_group {
@@ -176,17 +183,17 @@ struct nvm_block;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
-typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
+typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
+typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
nvm_l2p_update_fn *, void *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
nvm_bb_update_fn *, void *);
-typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
-typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
-typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
-typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
+typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
+typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
+typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
dma_addr_t *);
typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 70400dc7660f..c57e424d914b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -2,7 +2,7 @@
* Runtime locking correctness validator
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* see Documentation/locking/lockdep-design.txt for more details.
*/
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7501626ab529..d3133be12d92 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -427,6 +427,17 @@ enum {
};
enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
+enum {
MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 81b26a543a3c..c20b814e46a0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2127,7 +2127,7 @@ struct pcpu_sw_netstats {
})
#define netdev_alloc_pcpu_stats(type) \
- __netdev_alloc_pcpu_stats(type, GFP_KERNEL);
+ __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_UNKNOWN,
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 249d1bb01e03..5646b24bfc64 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -14,7 +14,7 @@ struct nfnl_callback {
int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
- int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+ int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
const struct nla_policy *policy; /* netlink attribute policy */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 039f2eec49ce..1e0deb8e8494 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -46,12 +46,14 @@ extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
extern struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 rid);
extern void of_msi_configure(struct device *dev, struct device_node *np);
+u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
#else
static inline int of_irq_count(struct device_node *dev)
{
@@ -70,6 +72,11 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
{
return 0;
}
+static inline void *of_irq_find_parent(struct device_node *child)
+{
+ return NULL;
+}
+
static inline struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token)
@@ -84,6 +91,11 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
+static inline u32 of_msi_map_rid(struct device *dev,
+ struct device_node *msi_np, u32 rid_in)
+{
+ return rid_in;
+}
#endif
#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
@@ -93,7 +105,6 @@ static inline void of_msi_configure(struct device *dev, struct device_node *np)
* so declare it here regardless of the CONFIG_OF_IRQ setting.
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
#else /* !CONFIG_OF && !CONFIG_SPARC */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
@@ -101,12 +112,6 @@ static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
{
return 0;
}
-
-static inline u32 of_msi_map_rid(struct device *dev,
- struct device_node *msi_np, u32 rid_in)
-{
- return rid_in;
-}
#endif /* !CONFIG_OF */
#endif /* __OF_IRQ_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33bcdc9..f9828a48f16a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -697,9 +697,11 @@ struct perf_cgroup {
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
+perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
{
- return container_of(task_css(task, perf_event_cgrp_id),
+ return container_of(task_css_check(task, perf_event_cgrp_id,
+ ctx ? lockdep_is_held(&ctx->lock)
+ : true),
struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index e2878baeb90e..4299f4ba03bd 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -72,7 +72,7 @@ struct edma_soc_info {
struct edma_rsv_info *rsv;
/* List of channels allocated for memcpy, terminated with -1 */
- s16 *memcpy_channels;
+ s32 *memcpy_channels;
s8 (*queue_priority_mapping)[2];
const s16 (*xbar_chans)[2];
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 5440f64d2942..21221338ad18 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -1,7 +1,7 @@
/*
* FLoating proportions
*
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* This file contains the public data structure and API definitions.
*/
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 6a4347639c03..1d1ba2c5ee7a 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -9,6 +9,8 @@
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 4
#define FW_REVISION_VERSION 2
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index b920c3605c46..41b9049b57e2 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -111,7 +111,8 @@ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
(u32)p_chain->cons_idx;
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= (used / p_chain->elem_per_page);
+ used -= p_chain->prod_idx / p_chain->elem_per_page -
+ p_chain->cons_idx / p_chain->elem_per_page;
return p_chain->capacity - used;
}
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 77deece15fb3..63bd7601b6de 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/compiler.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
@@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
-int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl);
-int rhashtable_insert_rehash(struct rhashtable *ht);
+struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
+ const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *old_tbl);
+int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
@@ -598,9 +600,11 @@ restart:
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(new_tbl)) {
- err = rhashtable_insert_slow(ht, key, obj, new_tbl);
- if (err == -EAGAIN)
+ tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ if (!IS_ERR_OR_NULL(tbl))
goto slow_path;
+
+ err = PTR_ERR(tbl);
goto out;
}
@@ -611,7 +615,7 @@ restart:
if (unlikely(rht_grow_above_100(ht, tbl))) {
slow_path:
spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht);
+ err = rhashtable_insert_rehash(ht, tbl);
rcu_read_unlock();
if (err)
return err;
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 0adedca24c5b..0e1b1540597a 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
* grabbing every spinlock (and more). So the "read" side to such a
* lock is anything which disables preemption.
*/
-#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/**
* stop_machine: freeze the machine on all CPUs and run this function
@@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
-#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
static inline int stop_machine(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
@@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
return stop_machine(fn, data, cpus);
}
-#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 0bdc72f36905..4a29c75b146e 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -21,7 +21,7 @@
* Authors:
* Srikar Dronamraju
* Jim Keniston
- * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/errno.h>
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 9948c874e3f1..1d0043dc34e4 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -47,4 +47,7 @@
/* device generates spurious wakeup, ignore remote wakeup capability */
#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
+/* device can't handle Link Power Management */
+#define USB_QUIRK_NO_LPM BIT(10)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 610a86a892b8..ddb440975382 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -44,9 +44,6 @@ struct vfio_device_ops {
void (*request)(void *device_data, unsigned int count);
};
-extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
-extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
-
extern int vfio_add_group_dev(struct device *dev,
const struct vfio_device_ops *ops,
void *device_data);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6aa09a875fbd..0496c31aaf06 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -166,7 +166,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
list_del(&old->task_list);
}
-typedef int wait_bit_action_f(struct wait_bit_key *);
+typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
@@ -981,10 +981,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
} while (0)
-extern int bit_wait(struct wait_bit_key *);
-extern int bit_wait_io(struct wait_bit_key *);
-extern int bit_wait_timeout(struct wait_bit_key *);
-extern int bit_wait_io_timeout(struct wait_bit_key *);
+extern int bit_wait(struct wait_bit_key *, int);
+extern int bit_wait_io(struct wait_bit_key *, int);
+extern int bit_wait_timeout(struct wait_bit_key *, int);
+extern int bit_wait_io_timeout(struct wait_bit_key *, int);
/**
* wait_on_bit - wait for a bit to be cleared
diff --git a/include/net/dst.h b/include/net/dst.h
index 1279f9b09791..c7329dcd90cc 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -322,6 +322,39 @@ static inline void skb_dst_force(struct sk_buff *skb)
}
}
+/**
+ * dst_hold_safe - Take a reference on a dst if possible
+ * @dst: pointer to dst entry
+ *
+ * This helper returns false if it could not safely
+ * take a reference on a dst.
+ */
+static inline bool dst_hold_safe(struct dst_entry *dst)
+{
+ if (dst->flags & DST_NOCACHE)
+ return atomic_inc_not_zero(&dst->__refcnt);
+ dst_hold(dst);
+ return true;
+}
+
+/**
+ * skb_dst_force_safe - makes sure skb dst is refcounted
+ * @skb: buffer
+ *
+ * If dst is not yet refcounted and not destroyed, grab a ref on it.
+ */
+static inline void skb_dst_force_safe(struct sk_buff *skb)
+{
+ if (skb_dst_is_noref(skb)) {
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (!dst_hold_safe(dst))
+ dst = NULL;
+
+ skb->_skb_refdst = (unsigned long)dst;
+ }
+}
+
/**
* __skb_tunnel_rx - prepare skb for rx reinsert
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 2134e6d815bc..625bdf95d673 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -210,18 +210,37 @@ struct inet_sock {
#define IP_CMSG_ORIGDSTADDR BIT(6)
#define IP_CMSG_CHECKSUM BIT(7)
-/* SYNACK messages might be attached to request sockets.
+/**
+ * sk_to_full_sk - Access to a full socket
+ * @sk: pointer to a socket
+ *
+ * SYNACK messages might be attached to request sockets.
* Some places want to reach the listener in this case.
*/
-static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
+static inline struct sock *sk_to_full_sk(struct sock *sk)
{
- struct sock *sk = skb->sk;
-
+#ifdef CONFIG_INET
if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
sk = inet_reqsk(sk)->rsk_listener;
+#endif
+ return sk;
+}
+
+/* sk_to_full_sk() variant with a const argument */
+static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
+{
+#ifdef CONFIG_INET
+ if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ sk = ((const struct request_sock *)sk)->rsk_listener;
+#endif
return sk;
}
+static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
+{
+ return sk_to_full_sk(skb->sk);
+}
+
static inline struct inet_sock *inet_sk(const struct sock *sk)
{
return (struct inet_sock *)sk;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 4a6009d4486b..235c7811a86a 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -78,6 +78,7 @@ void inet_initpeers(void) __init;
static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
{
iaddr->a4.addr = ip;
+ iaddr->a4.vif = 0;
iaddr->family = AF_INET;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 7bbb71081aeb..eea9bdeecba2 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1493,7 +1493,8 @@ struct sctp_association {
* : SACK's are not delayed (see Section 6).
*/
__u8 sack_needed:1, /* Do we need to sack the peer? */
- sack_generation:1;
+ sack_generation:1,
+ zero_window_announced:1;
__u32 sack_cnt;
__u32 adaptation_ind; /* Adaptation Code point. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 6e6e8a25d997..3794cdde837a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -389,7 +389,7 @@ struct sock {
struct socket_wq *sk_wq_raw;
};
#ifdef CONFIG_XFRM
- struct xfrm_policy *sk_policy[2];
+ struct xfrm_policy __rcu *sk_policy[2];
#endif
struct dst_entry *sk_rx_dst;
struct dst_entry __rcu *sk_dst_cache;
@@ -405,6 +405,7 @@ struct sock {
sk_userlocks : 4,
sk_protocol : 8,
sk_type : 16;
+#define SK_PROTOCOL_MAX U8_MAX
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
gfp_t sk_allocation;
@@ -736,6 +737,8 @@ enum sock_flags {
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
};
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
nsk->sk_flags = osk->sk_flags;
@@ -810,7 +813,7 @@ void sk_stream_write_space(struct sock *sk);
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
/* dont let skb dst not refcounted, we are going to leave rcu lock */
- skb_dst_force(skb);
+ skb_dst_force_safe(skb);
if (!sk->sk_backlog.tail)
sk->sk_backlog.head = skb;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b5a1aec1a167..0fb86442544b 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -79,7 +79,7 @@ struct vxlanhdr {
};
/* VXLAN header flags. */
-#define VXLAN_HF_RCO BIT(24)
+#define VXLAN_HF_RCO BIT(21)
#define VXLAN_HF_VNI BIT(27)
#define VXLAN_HF_GBP BIT(31)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 4a9c21f9b4ea..d6f6e5006ee9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -548,6 +548,7 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
+ struct rcu_head rcu;
};
static inline struct net *xp_net(const struct xfrm_policy *xp)
@@ -1141,12 +1142,14 @@ static inline int xfrm6_route_forward(struct sk_buff *skb)
return xfrm_route_forward(skb, AF_INET6);
}
-int __xfrm_sk_clone_policy(struct sock *sk);
+int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
-static inline int xfrm_sk_clone_policy(struct sock *sk)
+static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
{
- if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
- return __xfrm_sk_clone_policy(sk);
+ sk->sk_policy[0] = NULL;
+ sk->sk_policy[1] = NULL;
+ if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
+ return __xfrm_sk_clone_policy(sk, osk);
return 0;
}
@@ -1154,12 +1157,16 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
static inline void xfrm_sk_free_policy(struct sock *sk)
{
- if (unlikely(sk->sk_policy[0] != NULL)) {
- xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
+ struct xfrm_policy *pol;
+
+ pol = rcu_dereference_protected(sk->sk_policy[0], 1);
+ if (unlikely(pol != NULL)) {
+ xfrm_policy_delete(pol, XFRM_POLICY_MAX);
sk->sk_policy[0] = NULL;
}
- if (unlikely(sk->sk_policy[1] != NULL)) {
- xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
+ pol = rcu_dereference_protected(sk->sk_policy[1], 1);
+ if (unlikely(pol != NULL)) {
+ xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
sk->sk_policy[1] = NULL;
}
}
@@ -1169,7 +1176,7 @@ void xfrm_garbage_collect(struct net *net);
#else
static inline void xfrm_sk_free_policy(struct sock *sk) {}
-static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
+static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 188df91d5851..ec9b44dd3d80 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -237,6 +237,8 @@ struct ib_vendor_mad {
u8 data[IB_MGMT_VENDOR_DATA];
};
+#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001)
+
struct ib_class_port_info {
u8 base_version;
u8 class_version;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9a68a19532ba..120da1d7f57e 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1271,6 +1271,7 @@ struct ib_uobject {
int id; /* index into kernel idr */
struct kref ref;
struct rw_semaphore mutex; /* protects .live */
+ struct rcu_head rcu; /* kfree_rcu() overhead */
int live;
};
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ed527121031d..fcfa3d7f5e7e 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -668,6 +668,9 @@ struct Scsi_Host {
unsigned use_blk_mq:1;
unsigned use_cmd_list:1;
+ /* Host responded with short (<36 bytes) INQUIRY result */
+ unsigned short_inquiry:1;
+
/*
* Optional work queue to be utilized by the transport
*/
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 2ae8812d7b1a..94dc6a9772e0 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -93,6 +93,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_HSW_EM4 0x100c
#define AZX_REG_HSW_EM5 0x1010
+/* Skylake/Broxton display HD-A controller Extended Mode registers */
+#define AZX_REG_SKL_EM4L 0x1040
+
/* PCI space */
#define AZX_PCIREG_TCSEL 0x44
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 7855cfe46b69..95a937eafb79 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -398,6 +398,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
+void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm);
/* dapm events */
void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 628e6e64c2fb..c2e5d6cb34e3 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -186,6 +186,7 @@ header-y += if_tunnel.h
header-y += if_vlan.h
header-y += if_x25.h
header-y += igmp.h
+header-y += ila.h
header-y += in6.h
header-y += inet_diag.h
header-y += in.h
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 28ccedd000f5..a27222d5b413 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -628,7 +628,7 @@ struct ovs_action_hash {
* @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
* mask, the corresponding bit in the value is copied to the connection
* tracking mark field in the connection.
- * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
+ * @OVS_CT_ATTR_LABELS: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
* mask. For each bit set in the mask, the corresponding bit in the value is
* copied to the connection tracking label field in the connection.
* @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 751b69f858c8..9fd7b5d8df2f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -39,13 +39,6 @@
#define VFIO_SPAPR_TCE_v2_IOMMU 7
/*
- * The No-IOMMU IOMMU offers no translation or isolation for devices and
- * supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU
- * code will taint the host kernel and should be used with extreme caution.
- */
-#define VFIO_NOIOMMU_IOMMU 8
-
-/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
* kernel and userspace. We therefore use the _IO() macro for these
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 85dedca3dcfb..eeba75395f7d 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -343,7 +343,6 @@ struct ipu_client_platformdata {
int di;
int dc;
int dp;
- int dmfc;
int dma[2];
};
diff --git a/init/Kconfig b/init/Kconfig
index c24b6f767bf0..235c7a2c0d20 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2030,13 +2030,6 @@ config INIT_ALL_POSSIBLE
it was better to provide this option than to break all the archs
and have several arch maintainers pursuing me down dark alleys.
-config STOP_MACHINE
- bool
- default y
- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
- help
- Need stop_machine() primitive.
-
source "block/Kconfig"
config PREEMPT_NOTIFIERS
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4466273f59e1..fe95970b1f79 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -98,6 +98,12 @@ static DEFINE_SPINLOCK(css_set_lock);
static DEFINE_SPINLOCK(cgroup_idr_lock);
/*
+ * Protects cgroup_file->kn for !self csses. It synchronizes notifications
+ * against file removal/re-creation across css hiding.
+ */
+static DEFINE_SPINLOCK(cgroup_file_kn_lock);
+
+/*
* Protects cgroup_subsys->release_agent_path. Modifying it also requires
* cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
*/
@@ -730,9 +736,11 @@ static void put_css_set_locked(struct css_set *cset)
if (!atomic_dec_and_test(&cset->refcount))
return;
- /* This css_set is dead. unlink it and release cgroup refcounts */
- for_each_subsys(ss, ssid)
+ /* This css_set is dead. unlink it and release cgroup and css refs */
+ for_each_subsys(ss, ssid) {
list_del(&cset->e_cset_node[ssid]);
+ css_put(cset->subsys[ssid]);
+ }
hash_del(&cset->hlist);
css_set_count--;
@@ -1032,9 +1040,13 @@ static struct css_set *find_css_set(struct css_set *old_cset,
key = css_set_hash(cset->subsys);
hash_add(css_set_table, &cset->hlist, key);
- for_each_subsys(ss, ssid)
+ for_each_subsys(ss, ssid) {
+ struct cgroup_subsys_state *css = cset->subsys[ssid];
+
list_add_tail(&cset->e_cset_node[ssid],
- &cset->subsys[ssid]->cgroup->e_csets[ssid]);
+ &css->cgroup->e_csets[ssid]);
+ css_get(css);
+ }
spin_unlock_bh(&css_set_lock);
@@ -1369,6 +1381,16 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
char name[CGROUP_FILE_NAME_MAX];
lockdep_assert_held(&cgroup_mutex);
+
+ if (cft->file_offset) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
+ struct cgroup_file *cfile = (void *)css + cft->file_offset;
+
+ spin_lock_irq(&cgroup_file_kn_lock);
+ cfile->kn = NULL;
+ spin_unlock_irq(&cgroup_file_kn_lock);
+ }
+
kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
}
@@ -1832,7 +1854,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
INIT_LIST_HEAD(&cgrp->self.sibling);
INIT_LIST_HEAD(&cgrp->self.children);
- INIT_LIST_HEAD(&cgrp->self.files);
INIT_LIST_HEAD(&cgrp->cset_links);
INIT_LIST_HEAD(&cgrp->pidlists);
mutex_init(&cgrp->pidlist_mutex);
@@ -2193,6 +2214,9 @@ struct cgroup_taskset {
struct list_head src_csets;
struct list_head dst_csets;
+ /* the subsys currently being processed */
+ int ssid;
+
/*
* Fields for cgroup_taskset_*() iteration.
*
@@ -2255,25 +2279,29 @@ static void cgroup_taskset_add(struct task_struct *task,
/**
* cgroup_taskset_first - reset taskset and return the first task
* @tset: taskset of interest
+ * @dst_cssp: output variable for the destination css
*
* @tset iteration is initialized and the first task is returned.
*/
-struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
+ struct cgroup_subsys_state **dst_cssp)
{
tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
tset->cur_task = NULL;
- return cgroup_taskset_next(tset);
+ return cgroup_taskset_next(tset, dst_cssp);
}
/**
* cgroup_taskset_next - iterate to the next task in taskset
* @tset: taskset of interest
+ * @dst_cssp: output variable for the destination css
*
* Return the next task in @tset. Iteration must have been initialized
* with cgroup_taskset_first().
*/
-struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
+ struct cgroup_subsys_state **dst_cssp)
{
struct css_set *cset = tset->cur_cset;
struct task_struct *task = tset->cur_task;
@@ -2288,6 +2316,18 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
if (&task->cg_list != &cset->mg_tasks) {
tset->cur_cset = cset;
tset->cur_task = task;
+
+ /*
+ * This function may be called both before and
+ * after cgroup_taskset_migrate(). The two cases
+ * can be distinguished by looking at whether @cset
+ * has its ->mg_dst_cset set.
+ */
+ if (cset->mg_dst_cset)
+ *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
+ else
+ *dst_cssp = cset->subsys[tset->ssid];
+
return task;
}
@@ -2323,7 +2363,8 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
/* check that we can legitimately attach to the cgroup */
for_each_e_css(css, i, dst_cgrp) {
if (css->ss->can_attach) {
- ret = css->ss->can_attach(css, tset);
+ tset->ssid = i;
+ ret = css->ss->can_attach(tset);
if (ret) {
failed_css = css;
goto out_cancel_attach;
@@ -2356,9 +2397,12 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
*/
tset->csets = &tset->dst_csets;
- for_each_e_css(css, i, dst_cgrp)
- if (css->ss->attach)
- css->ss->attach(css, tset);
+ for_each_e_css(css, i, dst_cgrp) {
+ if (css->ss->attach) {
+ tset->ssid = i;
+ css->ss->attach(tset);
+ }
+ }
ret = 0;
goto out_release_tset;
@@ -2367,8 +2411,10 @@ out_cancel_attach:
for_each_e_css(css, i, dst_cgrp) {
if (css == failed_css)
break;
- if (css->ss->cancel_attach)
- css->ss->cancel_attach(css, tset);
+ if (css->ss->cancel_attach) {
+ tset->ssid = i;
+ css->ss->cancel_attach(tset);
+ }
}
out_release_tset:
spin_lock_bh(&css_set_lock);
@@ -3290,9 +3336,9 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
if (cft->file_offset) {
struct cgroup_file *cfile = (void *)css + cft->file_offset;
- kernfs_get(kn);
+ spin_lock_irq(&cgroup_file_kn_lock);
cfile->kn = kn;
- list_add(&cfile->node, &css->files);
+ spin_unlock_irq(&cgroup_file_kn_lock);
}
return 0;
@@ -3530,6 +3576,22 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
}
/**
+ * cgroup_file_notify - generate a file modified event for a cgroup_file
+ * @cfile: target cgroup_file
+ *
+ * @cfile must have been obtained by setting cftype->file_offset.
+ */
+void cgroup_file_notify(struct cgroup_file *cfile)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cgroup_file_kn_lock, flags);
+ if (cfile->kn)
+ kernfs_notify(cfile->kn);
+ spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
+}
+
+/**
* cgroup_task_count - count the number of tasks in a cgroup.
* @cgrp: the cgroup in question
*
@@ -4590,13 +4652,9 @@ static void css_free_work_fn(struct work_struct *work)
container_of(work, struct cgroup_subsys_state, destroy_work);
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
- struct cgroup_file *cfile;
percpu_ref_exit(&css->refcnt);
- list_for_each_entry(cfile, &css->files, node)
- kernfs_put(cfile->kn);
-
if (ss) {
/* css free path */
int id = css->id;
@@ -4701,7 +4759,6 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
css->ss = ss;
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
- INIT_LIST_HEAD(&css->files);
css->serial_nr = css_serial_nr_next++;
if (cgroup_parent(cgrp)) {
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index f1b30ad5dc6d..2d3df82c54f2 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -155,12 +155,10 @@ static void freezer_css_free(struct cgroup_subsys_state *css)
* @freezer->lock. freezer_attach() makes the new tasks conform to the
* current state and all following state changes can see the new tasks.
*/
-static void freezer_attach(struct cgroup_subsys_state *new_css,
- struct cgroup_taskset *tset)
+static void freezer_attach(struct cgroup_taskset *tset)
{
- struct freezer *freezer = css_freezer(new_css);
struct task_struct *task;
- bool clear_frozen = false;
+ struct cgroup_subsys_state *new_css;
mutex_lock(&freezer_mutex);
@@ -174,22 +172,21 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
* current state before executing the following - !frozen tasks may
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
*/
- cgroup_taskset_for_each(task, tset) {
+ cgroup_taskset_for_each(task, new_css, tset) {
+ struct freezer *freezer = css_freezer(new_css);
+
if (!(freezer->state & CGROUP_FREEZING)) {
__thaw_task(task);
} else {
freeze_task(task);
- freezer->state &= ~CGROUP_FROZEN;
- clear_frozen = true;
+ /* clear FROZEN and propagate upwards */
+ while (freezer && (freezer->state & CGROUP_FROZEN)) {
+ freezer->state &= ~CGROUP_FROZEN;
+ freezer = parent_freezer(freezer);
+ }
}
}
- /* propagate FROZEN clearing upwards */
- while (clear_frozen && (freezer = parent_freezer(freezer))) {
- freezer->state &= ~CGROUP_FROZEN;
- clear_frozen = freezer->state & CGROUP_FREEZING;
- }
-
mutex_unlock(&freezer_mutex);
}
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index cdd8df4e991c..b50d5a167fda 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -106,7 +106,7 @@ static void pids_uncharge(struct pids_cgroup *pids, int num)
{
struct pids_cgroup *p;
- for (p = pids; p; p = parent_pids(p))
+ for (p = pids; parent_pids(p); p = parent_pids(p))
pids_cancel(p, num);
}
@@ -123,7 +123,7 @@ static void pids_charge(struct pids_cgroup *pids, int num)
{
struct pids_cgroup *p;
- for (p = pids; p; p = parent_pids(p))
+ for (p = pids; parent_pids(p); p = parent_pids(p))
atomic64_add(num, &p->counter);
}
@@ -140,7 +140,7 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
{
struct pids_cgroup *p, *q;
- for (p = pids; p; p = parent_pids(p)) {
+ for (p = pids; parent_pids(p); p = parent_pids(p)) {
int64_t new = atomic64_add_return(num, &p->counter);
/*
@@ -162,13 +162,13 @@ revert:
return -EAGAIN;
}
-static int pids_can_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static int pids_can_attach(struct cgroup_taskset *tset)
{
- struct pids_cgroup *pids = css_pids(css);
struct task_struct *task;
+ struct cgroup_subsys_state *dst_css;
- cgroup_taskset_for_each(task, tset) {
+ cgroup_taskset_for_each(task, dst_css, tset) {
+ struct pids_cgroup *pids = css_pids(dst_css);
struct cgroup_subsys_state *old_css;
struct pids_cgroup *old_pids;
@@ -187,13 +187,13 @@ static int pids_can_attach(struct cgroup_subsys_state *css,
return 0;
}
-static void pids_cancel_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void pids_cancel_attach(struct cgroup_taskset *tset)
{
- struct pids_cgroup *pids = css_pids(css);
struct task_struct *task;
+ struct cgroup_subsys_state *dst_css;
- cgroup_taskset_for_each(task, tset) {
+ cgroup_taskset_for_each(task, dst_css, tset) {
+ struct pids_cgroup *pids = css_pids(dst_css);
struct cgroup_subsys_state *old_css;
struct pids_cgroup *old_pids;
@@ -205,65 +205,28 @@ static void pids_cancel_attach(struct cgroup_subsys_state *css,
}
}
+/*
+ * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
+ * on threadgroup_change_begin() held by the copy_process().
+ */
static int pids_can_fork(struct task_struct *task, void **priv_p)
{
struct cgroup_subsys_state *css;
struct pids_cgroup *pids;
- int err;
- /*
- * Use the "current" task_css for the pids subsystem as the tentative
- * css. It is possible we will charge the wrong hierarchy, in which
- * case we will forcefully revert/reapply the charge on the right
- * hierarchy after it is committed to the task proper.
- */
- css = task_get_css(current, pids_cgrp_id);
+ css = task_css_check(current, pids_cgrp_id, true);
pids = css_pids(css);
-
- err = pids_try_charge(pids, 1);
- if (err)
- goto err_css_put;
-
- *priv_p = css;
- return 0;
-
-err_css_put:
- css_put(css);
- return err;
+ return pids_try_charge(pids, 1);
}
static void pids_cancel_fork(struct task_struct *task, void *priv)
{
- struct cgroup_subsys_state *css = priv;
- struct pids_cgroup *pids = css_pids(css);
-
- pids_uncharge(pids, 1);
- css_put(css);
-}
-
-static void pids_fork(struct task_struct *task, void *priv)
-{
struct cgroup_subsys_state *css;
- struct cgroup_subsys_state *old_css = priv;
struct pids_cgroup *pids;
- struct pids_cgroup *old_pids = css_pids(old_css);
- css = task_get_css(task, pids_cgrp_id);
+ css = task_css_check(current, pids_cgrp_id, true);
pids = css_pids(css);
-
- /*
- * If the association has changed, we have to revert and reapply the
- * charge/uncharge on the wrong hierarchy to the current one. Since
- * the association can only change due to an organisation event, its
- * okay for us to ignore the limit in this case.
- */
- if (pids != old_pids) {
- pids_uncharge(old_pids, 1);
- pids_charge(pids, 1);
- }
-
- css_put(css);
- css_put(old_css);
+ pids_uncharge(pids, 1);
}
static void pids_free(struct task_struct *task)
@@ -335,6 +298,7 @@ static struct cftype pids_files[] = {
{
.name = "current",
.read_s64 = pids_current_read,
+ .flags = CFTYPE_NOT_ON_ROOT,
},
{ } /* terminate */
};
@@ -346,7 +310,6 @@ struct cgroup_subsys pids_cgrp_subsys = {
.cancel_attach = pids_cancel_attach,
.can_fork = pids_can_fork,
.cancel_fork = pids_cancel_fork,
- .fork = pids_fork,
.free = pids_free,
.legacy_cftypes = pids_files,
.dfl_cftypes = pids_files,
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 10ae73611d80..02a8ea5c9963 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1429,15 +1429,16 @@ static int fmeter_getrate(struct fmeter *fmp)
static struct cpuset *cpuset_attach_old_cs;
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
-static int cpuset_can_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static int cpuset_can_attach(struct cgroup_taskset *tset)
{
- struct cpuset *cs = css_cs(css);
+ struct cgroup_subsys_state *css;
+ struct cpuset *cs;
struct task_struct *task;
int ret;
/* used later by cpuset_attach() */
- cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset));
+ cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
+ cs = css_cs(css);
mutex_lock(&cpuset_mutex);
@@ -1447,7 +1448,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
goto out_unlock;
- cgroup_taskset_for_each(task, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
ret = task_can_attach(task, cs->cpus_allowed);
if (ret)
goto out_unlock;
@@ -1467,9 +1468,14 @@ out_unlock:
return ret;
}
-static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void cpuset_cancel_attach(struct cgroup_taskset *tset)
{
+ struct cgroup_subsys_state *css;
+ struct cpuset *cs;
+
+ cgroup_taskset_first(tset, &css);
+ cs = css_cs(css);
+
mutex_lock(&cpuset_mutex);
css_cs(css)->attach_in_progress--;
mutex_unlock(&cpuset_mutex);
@@ -1482,16 +1488,19 @@ static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
*/
static cpumask_var_t cpus_attach;
-static void cpuset_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void cpuset_attach(struct cgroup_taskset *tset)
{
/* static buf protected by cpuset_mutex */
static nodemask_t cpuset_attach_nodemask_to;
struct task_struct *task;
struct task_struct *leader;
- struct cpuset *cs = css_cs(css);
+ struct cgroup_subsys_state *css;
+ struct cpuset *cs;
struct cpuset *oldcs = cpuset_attach_old_cs;
+ cgroup_taskset_first(tset, &css);
+ cs = css_cs(css);
+
mutex_lock(&cpuset_mutex);
/* prepare for attach */
@@ -1502,7 +1511,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
- cgroup_taskset_for_each(task, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
@@ -1518,7 +1527,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
* sleep and should be moved outside migration path proper.
*/
cpuset_attach_nodemask_to = cs->effective_mems;
- cgroup_taskset_for_each_leader(leader, tset) {
+ cgroup_taskset_for_each_leader(leader, css, tset) {
struct mm_struct *mm = get_task_mm(leader);
if (mm) {
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index d659487254d5..9c418002b8c1 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 36babfd20648..ef2d6ea10736 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
if (!is_cgroup_event(event))
return;
- cgrp = perf_cgroup_from_task(current);
+ cgrp = perf_cgroup_from_task(current, event->ctx);
/*
* Do not update time when cgroup is not active
*/
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
if (!task || !ctx->nr_cgroups)
return;
- cgrp = perf_cgroup_from_task(task);
+ cgrp = perf_cgroup_from_task(task, ctx);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
* we reschedule only in the presence of cgroup
* constrained events.
*/
- rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -522,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
* set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass
* task around
+ * we pass the cpuctx->ctx to perf_cgroup_from_task()
+ * because cgorup events are only per-cpu
*/
- cpuctx->cgrp = perf_cgroup_from_task(task);
+ cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
perf_pmu_enable(cpuctx->ctx.pmu);
@@ -531,8 +532,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
}
}
- rcu_read_unlock();
-
local_irq_restore(flags);
}
@@ -542,17 +541,20 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
+ rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
+ * we do not need to pass the ctx here because we know
+ * we are holding the rcu lock
*/
- cgrp1 = perf_cgroup_from_task(task);
+ cgrp1 = perf_cgroup_from_task(task, NULL);
/*
* next is NULL when called from perf_event_enable_on_exec()
* that will systematically cause a cgroup_switch()
*/
if (next)
- cgrp2 = perf_cgroup_from_task(next);
+ cgrp2 = perf_cgroup_from_task(next, NULL);
/*
* only schedule out current cgroup events if we know
@@ -561,6 +563,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+
+ rcu_read_unlock();
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,13 +573,16 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
+ rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
+ * we do not need to pass the ctx here because we know
+ * we are holding the rcu lock
*/
- cgrp1 = perf_cgroup_from_task(task);
+ cgrp1 = perf_cgroup_from_task(task, NULL);
/* prev can never be NULL */
- cgrp2 = perf_cgroup_from_task(prev);
+ cgrp2 = perf_cgroup_from_task(prev, NULL);
/*
* only need to schedule in cgroup events if we are changing
@@ -584,6 +591,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+
+ rcu_read_unlock();
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -4216,7 +4225,14 @@ retry:
goto retry;
}
- __perf_event_period(&pe);
+ if (event->attr.freq) {
+ event->attr.sample_freq = value;
+ } else {
+ event->attr.sample_period = value;
+ event->hw.sample_period = value;
+ }
+
+ local64_set(&event->hw.period_left, 0);
raw_spin_unlock_irq(&ctx->lock);
return 0;
@@ -5667,6 +5683,17 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
}
static void
+perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
+ struct perf_event_context *task_ctx)
+{
+ rcu_read_lock();
+ preempt_disable();
+ perf_event_aux_ctx(task_ctx, output, data);
+ preempt_enable();
+ rcu_read_unlock();
+}
+
+static void
perf_event_aux(perf_event_aux_output_cb output, void *data,
struct perf_event_context *task_ctx)
{
@@ -5675,14 +5702,23 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
struct pmu *pmu;
int ctxn;
+ /*
+ * If we have task_ctx != NULL we only notify
+ * the task context itself. The task_ctx is set
+ * only for EXIT events before releasing task
+ * context.
+ */
+ if (task_ctx) {
+ perf_event_aux_task_ctx(output, data, task_ctx);
+ return;
+ }
+
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_aux_ctx(&cpuctx->ctx, output, data);
- if (task_ctx)
- goto next;
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
@@ -5692,12 +5728,6 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
-
- if (task_ctx) {
- preempt_disable();
- perf_event_aux_ctx(task_ctx, output, data);
- preempt_enable();
- }
rcu_read_unlock();
}
@@ -8787,10 +8817,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
struct perf_event_context *child_ctx, *clone_ctx = NULL;
unsigned long flags;
- if (likely(!child->perf_event_ctxp[ctxn])) {
- perf_event_task(child, NULL, 0);
+ if (likely(!child->perf_event_ctxp[ctxn]))
return;
- }
local_irq_save(flags);
/*
@@ -8874,6 +8902,14 @@ void perf_event_exit_task(struct task_struct *child)
for_each_task_context_nr(ctxn)
perf_event_exit_task_context(child, ctxn);
+
+ /*
+ * The perf_event_exit_task_context calls perf_event_task
+ * with child's task_ctx, which generates EXIT events for
+ * child contexts and sets child->perf_event_ctxp[] to NULL.
+ * At this point we need to send EXIT events to cpu contexts.
+ */
+ perf_event_task(child, NULL, 0);
}
static void perf_free_event(struct perf_event *event,
@@ -9452,16 +9488,18 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
+ rcu_read_lock();
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+ rcu_read_unlock();
return 0;
}
-static void perf_cgroup_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void perf_cgroup_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
+ struct cgroup_subsys_state *css;
- cgroup_taskset_for_each(task, tset)
+ cgroup_taskset_for_each(task, css, tset)
task_function_call(task, __perf_cgroup_move, task);
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b5d1ea79c595..adfdc0536117 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4e5e9798aa0c..7dad84913abf 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -19,7 +19,7 @@
* Authors:
* Srikar Dronamraju
* Jim Keniston
- * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/kernel.h>
diff --git a/kernel/fork.c b/kernel/fork.c
index f97f2c449f5c..fce002ee3ddf 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1368,8 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
p->audit_context = NULL;
- if (clone_flags & CLONE_THREAD)
- threadgroup_change_begin(current);
+ threadgroup_change_begin(current);
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
@@ -1610,8 +1609,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
proc_fork_connector(p);
cgroup_post_fork(p, cgrp_ss_priv);
- if (clone_flags & CLONE_THREAD)
- threadgroup_change_end(current);
+ threadgroup_change_end(current);
perf_event_fork(p);
trace_task_newtask(p, clone_flags);
@@ -1652,8 +1650,7 @@ bad_fork_cleanup_policy:
mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
- if (clone_flags & CLONE_THREAD)
- threadgroup_change_end(current);
+ threadgroup_change_end(current);
delayacct_tsk_free(p);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index cbf9fb899d92..bcf107ce0854 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
*
* Provides a framework for enqueueing and running callbacks from hardirq
* context. The enqueueing is NMI-safe.
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index f7dd15d537f9..05254eeb4b4e 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -2,7 +2,7 @@
* jump label support
*
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
- * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011 Peter Zijlstra
*
*/
#include <linux/memory.h>
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index deae3907ac1e..60ace56618f6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -6,7 +6,7 @@
* Started by Ingo Molnar:
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs:
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index d83d798bef95..dbb61a302548 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -6,7 +6,7 @@
* Started by Ingo Molnar:
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* Code for /proc/lockdep and /proc/lockdep_stats:
*
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index d092a0c9c2d4..05a37857ab55 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -93,10 +93,12 @@ bool osq_lock(struct optimistic_spin_queue *lock)
node->cpu = curr;
/*
- * ACQUIRE semantics, pairs with corresponding RELEASE
- * in unlock() uncontended, or fastpath.
+ * We need both ACQUIRE (pairs with corresponding RELEASE in
+ * unlock() uncontended, or fastpath) and RELEASE (to publish
+ * the node fields we just initialised) semantics when updating
+ * the lock tail.
*/
- old = atomic_xchg_acquire(&lock->tail, curr);
+ old = atomic_xchg(&lock->tail, curr);
if (old == OSQ_UNLOCKED_VAL)
return true;
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c0a205101c23..caf4041f5b0a 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -1,7 +1,7 @@
/*
* sched_clock for unstable cpu clocks
*
- * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
*
* Updates and enhancements:
* Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4d568ac9319e..732e993b564b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1947,13 +1947,38 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
#ifdef CONFIG_SMP
/*
+ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
+ * possible to, falsely, observe p->on_cpu == 0.
+ *
+ * One must be running (->on_cpu == 1) in order to remove oneself
+ * from the runqueue.
+ *
+ * [S] ->on_cpu = 1; [L] ->on_rq
+ * UNLOCK rq->lock
+ * RMB
+ * LOCK rq->lock
+ * [S] ->on_rq = 0; [L] ->on_cpu
+ *
+ * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
+ * from the consecutive calls to schedule(); the first switching to our
+ * task, the second putting it to sleep.
+ */
+ smp_rmb();
+
+ /*
* If the owning (remote) cpu is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*/
while (p->on_cpu)
cpu_relax();
/*
- * Pairs with the smp_wmb() in finish_lock_switch().
+ * Combined with the control dependency above, we have an effective
+ * smp_load_acquire() without the need for full barriers.
+ *
+ * Pairs with the smp_store_release() in finish_lock_switch().
+ *
+ * This ensures that tasks getting woken will be fully ordered against
+ * their previous state and preserve Program Order.
*/
smp_rmb();
@@ -2039,7 +2064,6 @@ out:
*/
int wake_up_process(struct task_struct *p)
{
- WARN_ON(task_is_stopped_or_traced(p));
return try_to_wake_up(p, TASK_NORMAL, 0);
}
EXPORT_SYMBOL(wake_up_process);
@@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd)
{
memset(rd, 0, sizeof(*rd));
- if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
goto out;
- if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
goto free_span;
- if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
goto free_online;
- if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_dlo_mask;
init_dl_bw(&rd->dl_bw);
@@ -8217,12 +8241,12 @@ static void cpu_cgroup_fork(struct task_struct *task, void *private)
sched_move_task(task);
}
-static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
+ struct cgroup_subsys_state *css;
- cgroup_taskset_for_each(task, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
@@ -8235,12 +8259,12 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
return 0;
}
-static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
+ struct cgroup_subsys_state *css;
- cgroup_taskset_for_each(task, tset)
+ cgroup_taskset_for_each(task, css, tset)
sched_move_task(task);
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 26a54461bf59..05de80b48586 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
unsigned int seq;
cputime_t gtime;
+ if (!context_tracking_is_enabled())
+ return t->gtime;
+
do {
seq = read_seqbegin(&t->vtime_seqlock);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f04fda8f669c..90e26b11deaa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -17,7 +17,7 @@
* Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/latencytop.h>
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e3cc16312046..8ec86abe0ea1 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
static void push_irq_work_func(struct irq_work *work);
#endif
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index efd3bfc7e347..b242775bf670 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
* We must ensure this doesn't happen until the switch is completely
* finished.
*
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
* Pairs with the control dependency and rmb in try_to_wake_up().
*/
smp_store_release(&prev->on_cpu, 0);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 052e02672d12..f15d6b6a538a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -392,7 +392,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
do {
prepare_to_wait(wq, &q->wait, mode);
if (test_bit(q->key.bit_nr, q->key.flags))
- ret = (*action)(&q->key);
+ ret = (*action)(&q->key, mode);
} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
finish_wait(wq, &q->wait);
return ret;
@@ -431,7 +431,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
prepare_to_wait_exclusive(wq, &q->wait, mode);
if (!test_bit(q->key.bit_nr, q->key.flags))
continue;
- ret = action(&q->key);
+ ret = action(&q->key, mode);
if (!ret)
continue;
abort_exclusive_wait(wq, &q->wait, mode, &q->key);
@@ -581,44 +581,44 @@ void wake_up_atomic_t(atomic_t *p)
}
EXPORT_SYMBOL(wake_up_atomic_t);
-__sched int bit_wait(struct wait_bit_key *word)
+__sched int bit_wait(struct wait_bit_key *word, int mode)
{
- if (signal_pending_state(current->state, current))
- return 1;
schedule();
+ if (signal_pending_state(mode, current))
+ return -EINTR;
return 0;
}
EXPORT_SYMBOL(bit_wait);
-__sched int bit_wait_io(struct wait_bit_key *word)
+__sched int bit_wait_io(struct wait_bit_key *word, int mode)
{
- if (signal_pending_state(current->state, current))
- return 1;
io_schedule();
+ if (signal_pending_state(mode, current))
+ return -EINTR;
return 0;
}
EXPORT_SYMBOL(bit_wait_io);
-__sched int bit_wait_timeout(struct wait_bit_key *word)
+__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
{
unsigned long now = READ_ONCE(jiffies);
- if (signal_pending_state(current->state, current))
- return 1;
if (time_after_eq(now, word->timeout))
return -EAGAIN;
schedule_timeout(word->timeout - now);
+ if (signal_pending_state(mode, current))
+ return -EINTR;
return 0;
}
EXPORT_SYMBOL_GPL(bit_wait_timeout);
-__sched int bit_wait_io_timeout(struct wait_bit_key *word)
+__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
{
unsigned long now = READ_ONCE(jiffies);
- if (signal_pending_state(current->state, current))
- return 1;
if (time_after_eq(now, word->timeout))
return -EAGAIN;
io_schedule_timeout(word->timeout - now);
+ if (signal_pending_state(mode, current))
+ return -EINTR;
return 0;
}
EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 867bc20e1ef1..a3bbaee77c58 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -531,7 +531,7 @@ static int __init cpu_stop_init(void)
}
early_initcall(cpu_stop_init);
-#ifdef CONFIG_STOP_MACHINE
+#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{
@@ -631,4 +631,4 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
return ret ?: done.ret;
}
-#endif /* CONFIG_STOP_MACHINE */
+#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index abfc903e741e..cc9f7a9319be 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -1,7 +1,7 @@
/*
* trace event based perf event profiling/tracing
*
- * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
*/
diff --git a/lib/btree.c b/lib/btree.c
index 4264871ea1a0..f93a945274af 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -5,7 +5,7 @@
*
* Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
* Bits and pieces stolen from Peter Zijlstra's code, which is
- * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright 2007, Red Hat Inc. Peter Zijlstra
* GPLv2
*
* see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 8855f019ebe8..d34bd24c2c84 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
entry->type = dma_debug_coherent;
entry->dev = dev;
entry->pfn = page_to_pfn(virt_to_page(virt));
- entry->offset = (size_t) virt & PAGE_MASK;
+ entry->offset = (size_t) virt & ~PAGE_MASK;
entry->size = size;
entry->dev_addr = dma_addr;
entry->direction = DMA_BIDIRECTIONAL;
@@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
.type = dma_debug_coherent,
.dev = dev,
.pfn = page_to_pfn(virt_to_page(virt)),
- .offset = (size_t) virt & PAGE_MASK,
+ .offset = (size_t) virt & ~PAGE_MASK,
.dev_addr = addr,
.size = size,
.direction = DMA_BIDIRECTIONAL,
diff --git a/lib/proportions.c b/lib/proportions.c
index 6f724298f67a..efa54f259ea9 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -1,7 +1,7 @@
/*
* Floating proportions
*
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* Description:
*
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 0f3be3fad2b2..1c149e9a5cd5 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -386,33 +386,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht,
return false;
}
-int rhashtable_insert_rehash(struct rhashtable *ht)
+int rhashtable_insert_rehash(struct rhashtable *ht,
+ struct bucket_table *tbl)
{
struct bucket_table *old_tbl;
struct bucket_table *new_tbl;
- struct bucket_table *tbl;
unsigned int size;
int err;
old_tbl = rht_dereference_rcu(ht->tbl, ht);
- tbl = rhashtable_last_table(ht, old_tbl);
size = tbl->size;
+ err = -EBUSY;
+
if (rht_grow_above_75(ht, tbl))
size *= 2;
/* Do not schedule more than one rehash */
else if (old_tbl != tbl)
- return -EBUSY;
+ goto fail;
+
+ err = -ENOMEM;
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
- if (new_tbl == NULL) {
- /* Schedule async resize/rehash to try allocation
- * non-atomic context.
- */
- schedule_work(&ht->run_work);
- return -ENOMEM;
- }
+ if (new_tbl == NULL)
+ goto fail;
err = rhashtable_rehash_attach(ht, tbl, new_tbl);
if (err) {
@@ -423,12 +421,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
schedule_work(&ht->run_work);
return err;
+
+fail:
+ /* Do not fail the insert if someone else did a rehash. */
+ if (likely(rcu_dereference_raw(tbl->future_tbl)))
+ return 0;
+
+ /* Schedule async rehash to retry allocation in process context. */
+ if (err == -ENOMEM)
+ schedule_work(&ht->run_work);
+
+ return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
-int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
- struct rhash_head *obj,
- struct bucket_table *tbl)
+struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
+ const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *tbl)
{
struct rhash_head *head;
unsigned int hash;
@@ -464,7 +474,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
exit:
spin_unlock(rht_bucket_lock(tbl, hash));
- return err;
+ if (err == 0)
+ return NULL;
+ else if (err == -EAGAIN)
+ return tbl;
+ else
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
@@ -500,10 +515,10 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
if (!iter->walker)
return -ENOMEM;
- mutex_lock(&ht->mutex);
+ spin_lock(&ht->lock);
iter->walker->tbl = rht_dereference(ht->tbl, ht);
list_add(&iter->walker->list, &iter->walker->tbl->walkers);
- mutex_unlock(&ht->mutex);
+ spin_unlock(&ht->lock);
return 0;
}
@@ -517,10 +532,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
*/
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
- mutex_lock(&iter->ht->mutex);
+ spin_lock(&iter->ht->lock);
if (iter->walker->tbl)
list_del(&iter->walker->list);
- mutex_unlock(&iter->ht->mutex);
+ spin_unlock(&iter->ht->lock);
kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
@@ -544,14 +559,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
{
struct rhashtable *ht = iter->ht;
- mutex_lock(&ht->mutex);
+ rcu_read_lock();
+ spin_lock(&ht->lock);
if (iter->walker->tbl)
list_del(&iter->walker->list);
-
- rcu_read_lock();
-
- mutex_unlock(&ht->mutex);
+ spin_unlock(&ht->lock);
if (!iter->walker->tbl) {
iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
@@ -720,9 +733,6 @@ int rhashtable_init(struct rhashtable *ht,
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
return -EINVAL;
- if (params->nelem_hint)
- size = rounded_hashtable_size(params);
-
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
spin_lock_init(&ht->lock);
@@ -742,6 +752,9 @@ int rhashtable_init(struct rhashtable *ht,
ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
+ if (params->nelem_hint)
+ size = rounded_hashtable_size(&ht->p);
+
/* The maximum (not average) chain length grows with the
* size of the hash table, at a rate of (log N)/(log log N).
* The value of 16 is selected so that even if the hash
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8ed2ffd963c5..7340353f8aea 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -957,8 +957,9 @@ EXPORT_SYMBOL(congestion_wait);
* jiffies for either a BDI to exit congestion of the given @sync queue
* or a write to complete.
*
- * In the absence of zone congestion, cond_resched() is called to yield
- * the processor if necessary but otherwise does not sleep.
+ * In the absence of zone congestion, a short sleep or a cond_resched is
+ * performed to yield the processor and to allow other subsystems to make
+ * a forward progress.
*
* The return value is 0 if the sleep is for the full timeout. Otherwise,
* it is the number of jiffies that were still remaining when the function
@@ -978,7 +979,19 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
*/
if (atomic_read(&nr_wb_congested[sync]) == 0 ||
!test_bit(ZONE_CONGESTED, &zone->flags)) {
- cond_resched();
+
+ /*
+ * Memory allocation/reclaim might be called from a WQ
+ * context and the current implementation of the WQ
+ * concurrency control doesn't recognize that a particular
+ * WQ is congested if the worker thread is looping without
+ * ever sleeping. Therefore we have to do a short sleep
+ * here rather than calling cond_resched().
+ */
+ if (current->flags & PF_WQ_WORKER)
+ schedule_timeout(1);
+ else
+ cond_resched();
/* In case we scheduled, work out time remaining */
ret = timeout - (jiffies - start);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 827bb02a43a4..ef6963b577fd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -372,8 +372,10 @@ retry_locked:
spin_unlock(&resv->lock);
trg = kmalloc(sizeof(*trg), GFP_KERNEL);
- if (!trg)
+ if (!trg) {
+ kfree(nrg);
return -ENOMEM;
+ }
spin_lock(&resv->lock);
list_add(&trg->link, &resv->region_cache);
@@ -483,8 +485,16 @@ static long region_del(struct resv_map *resv, long f, long t)
retry:
spin_lock(&resv->lock);
list_for_each_entry_safe(rg, trg, head, link) {
- if (rg->to <= f)
+ /*
+ * Skip regions before the range to be deleted. file_region
+ * ranges are normally of the form [from, to). However, there
+ * may be a "placeholder" entry in the map which is of the form
+ * (from, to) with from == to. Check for placeholder entries
+ * at the beginning of the range to be deleted.
+ */
+ if (rg->to <= f && (rg->to != rg->from || rg->to != f))
continue;
+
if (rg->from >= t)
break;
@@ -1886,7 +1896,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
if (!page)
goto out_uncharge_cgroup;
-
+ if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
+ SetPagePrivate(page);
+ h->resv_huge_pages--;
+ }
spin_lock(&hugetlb_lock);
list_move(&page->lru, &h->hugepage_activelist);
/* Fall through */
@@ -3693,12 +3706,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
+ } else {
+ ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+ if (!ptep)
+ return VM_FAULT_OOM;
}
- ptep = huge_pte_alloc(mm, address, huge_page_size(h));
- if (!ptep)
- return VM_FAULT_OOM;
-
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9acfb165eb52..e234c21a5e6c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2128,7 +2128,7 @@ done_restock:
*/
do {
if (page_counter_read(&memcg->memory) > memcg->high) {
- current->memcg_nr_pages_over_high += nr_pages;
+ current->memcg_nr_pages_over_high += batch;
set_notify_resume(current);
break;
}
@@ -4779,23 +4779,18 @@ static void mem_cgroup_clear_mc(void)
spin_unlock(&mc.lock);
}
-static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *memcg;
struct mem_cgroup *from;
struct task_struct *leader, *p;
struct mm_struct *mm;
unsigned long move_flags;
int ret = 0;
- /*
- * We are now commited to this value whatever it is. Changes in this
- * tunable will only affect upcoming migrations, not the current one.
- * So we need to save it, and keep it going.
- */
- move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
- if (!move_flags)
+ /* charge immigration isn't supported on the default hierarchy */
+ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
return 0;
/*
@@ -4805,13 +4800,23 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
* multiple.
*/
p = NULL;
- cgroup_taskset_for_each_leader(leader, tset) {
+ cgroup_taskset_for_each_leader(leader, css, tset) {
WARN_ON_ONCE(p);
p = leader;
+ memcg = mem_cgroup_from_css(css);
}
if (!p)
return 0;
+ /*
+ * We are now commited to this value whatever it is. Changes in this
+ * tunable will only affect upcoming migrations, not the current one.
+ * So we need to save it, and keep it going.
+ */
+ move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
+ if (!move_flags)
+ return 0;
+
from = mem_cgroup_from_task(p);
VM_BUG_ON(from == memcg);
@@ -4842,8 +4847,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
return ret;
}
-static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
if (mc.to)
mem_cgroup_clear_mc();
@@ -4985,10 +4989,10 @@ retry:
atomic_dec(&mc.from->moving_account);
}
-static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(struct cgroup_taskset *tset)
{
- struct task_struct *p = cgroup_taskset_first(tset);
+ struct cgroup_subsys_state *css;
+ struct task_struct *p = cgroup_taskset_first(tset, &css);
struct mm_struct *mm = get_task_mm(p);
if (mm) {
@@ -5000,17 +5004,14 @@ static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
-static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
{
return 0;
}
-static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(struct cgroup_taskset *tset)
{
}
#endif
@@ -5511,11 +5512,11 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
* mem_cgroup_replace_page - migrate a charge to another page
* @oldpage: currently charged page
* @newpage: page to transfer the charge to
- * @lrucare: either or both pages might be on the LRU already
*
* Migrate the charge from @oldpage to @newpage.
*
* Both pages must be locked, @newpage->mapping must be set up.
+ * Either or both pages might be on the LRU already.
*/
void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
{
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d13a33918fa2..c12680993ff3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -608,6 +608,8 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
continue;
if (unlikely(p->flags & PF_KTHREAD))
continue;
+ if (is_global_init(p))
+ continue;
if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
continue;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3e4d65445fa7..d15d88c8efa1 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2,7 +2,7 @@
* mm/page-writeback.c
*
* Copyright (C) 2002, Linus Torvalds.
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* Contains functions related to writing back dirty pages at the
* address_space level.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 17a3c66639a9..9d666df5ef95 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3647,8 +3647,9 @@ static void show_migration_types(unsigned char type)
{
static const char types[MIGRATE_TYPES] = {
[MIGRATE_UNMOVABLE] = 'U',
- [MIGRATE_RECLAIMABLE] = 'E',
[MIGRATE_MOVABLE] = 'M',
+ [MIGRATE_RECLAIMABLE] = 'E',
+ [MIGRATE_HIGHATOMIC] = 'H',
#ifdef CONFIG_CMA
[MIGRATE_CMA] = 'C',
#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 9187eee4128b..2afcdbbdb685 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -843,14 +843,14 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
list_add_tail(&info->swaplist, &shmem_swaplist);
if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
- swap_shmem_alloc(swap);
- shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
-
spin_lock(&info->lock);
- info->swapped++;
shmem_recalc_inode(inode);
+ info->swapped++;
spin_unlock(&info->lock);
+ swap_shmem_alloc(swap);
+ shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
+
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
@@ -1078,7 +1078,7 @@ repeat:
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
- goto failed;
+ goto unlock;
}
if (page && sgp == SGP_WRITE)
@@ -1246,11 +1246,15 @@ clear:
/* Perhaps the file has been truncated since we checked */
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+ if (alloced) {
+ ClearPageDirty(page);
+ delete_from_page_cache(page);
+ spin_lock(&info->lock);
+ shmem_recalc_inode(inode);
+ spin_unlock(&info->lock);
+ }
error = -EINVAL;
- if (alloced)
- goto trunc;
- else
- goto failed;
+ goto unlock;
}
*pagep = page;
return 0;
@@ -1258,23 +1262,13 @@ clear:
/*
* Error recovery.
*/
-trunc:
- info = SHMEM_I(inode);
- ClearPageDirty(page);
- delete_from_page_cache(page);
- spin_lock(&info->lock);
- info->alloced--;
- inode->i_blocks -= BLOCKS_PER_PAGE;
- spin_unlock(&info->lock);
decused:
- sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -1);
unacct:
shmem_unacct_blocks(info->flags, 1);
failed:
- if (swap.val && error != -EINVAL &&
- !shmem_confirm_swap(mapping, index, swap))
+ if (swap.val && !shmem_confirm_swap(mapping, index, swap))
error = -EEXIST;
unlock:
if (page) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 879a2be23325..0d5712b0206c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -921,8 +921,8 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
#ifdef CONFIG_PROC_FS
static char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
- "Reclaimable",
"Movable",
+ "Reclaimable",
"HighAtomic",
#ifdef CONFIG_CMA
"CMA",
@@ -1379,6 +1379,7 @@ static const struct file_operations proc_vmstat_file_operations = {
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SMP
+static struct workqueue_struct *vmstat_wq;
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
int sysctl_stat_interval __read_mostly = HZ;
static cpumask_var_t cpu_stat_off;
@@ -1391,7 +1392,7 @@ static void vmstat_update(struct work_struct *w)
* to occur in the future. Keep on running the
* update worker thread.
*/
- schedule_delayed_work_on(smp_processor_id(),
+ queue_delayed_work_on(smp_processor_id(), vmstat_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
} else {
@@ -1460,7 +1461,7 @@ static void vmstat_shepherd(struct work_struct *w)
if (need_update(cpu) &&
cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
- schedule_delayed_work_on(cpu,
+ queue_delayed_work_on(cpu, vmstat_wq,
&per_cpu(vmstat_work, cpu), 0);
put_online_cpus();
@@ -1549,6 +1550,7 @@ static int __init setup_vmstat(void)
start_shepherd_timer();
cpu_notifier_register_done();
+ vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
#endif
#ifdef CONFIG_PROC_FS
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index ae3a47f9d1d5..fbd0acf80b13 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -805,6 +805,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
struct sock *sk;
ax25_cb *ax25;
+ if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
+ return -EINVAL;
+
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 83bc1aaf5800..a49c705fb86b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -566,6 +566,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
int select;
batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
struct batadv_dat_candidate *res;
+ struct batadv_dat_entry dat;
if (!bat_priv->orig_hash)
return NULL;
@@ -575,7 +576,9 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
if (!res)
return NULL;
- ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst,
+ dat.ip = ip_dst;
+ dat.vid = 0;
+ ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
BATADV_DAT_ADDR_MAX);
batadv_dbg(BATADV_DBG_DAT, bat_priv,
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index a43f02e2d423..e4f2646d9246 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -836,6 +836,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
u8 *orig_addr;
struct batadv_orig_node *orig_node = NULL;
int check, hdr_size = sizeof(*unicast_packet);
+ enum batadv_subtype subtype;
bool is4addr;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -863,10 +864,20 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
/* packet for me */
if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
if (is4addr) {
- batadv_dat_inc_counter(bat_priv,
- unicast_4addr_packet->subtype);
- orig_addr = unicast_4addr_packet->src;
- orig_node = batadv_orig_hash_find(bat_priv, orig_addr);
+ subtype = unicast_4addr_packet->subtype;
+ batadv_dat_inc_counter(bat_priv, subtype);
+
+ /* Only payload data should be considered for speedy
+ * join. For example, DAT also uses unicast 4addr
+ * types, but those packets should not be considered
+ * for speedy join, since the clients do not actually
+ * reside at the sending originator.
+ */
+ if (subtype == BATADV_P_DATA) {
+ orig_addr = unicast_4addr_packet->src;
+ orig_node = batadv_orig_hash_find(bat_priv,
+ orig_addr);
+ }
}
if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5cf431177f34..ec67deff1621 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -68,13 +68,15 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
unsigned short vid, const char *message,
bool roaming);
-/* returns 1 if they are the same mac addr */
+/* returns 1 if they are the same mac addr and vid */
static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
{
const void *data1 = container_of(node, struct batadv_tt_common_entry,
hash_entry);
+ const struct batadv_tt_common_entry *tt1 = data1;
+ const struct batadv_tt_common_entry *tt2 = data2;
- return batadv_compare_eth(data1, data2);
+ return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2);
}
/**
@@ -1427,9 +1429,15 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
}
/* if the client was temporary added before receiving the first
- * OGM announcing it, we have to clear the TEMP flag
+ * OGM announcing it, we have to clear the TEMP flag. Also,
+ * remove the previous temporary orig node and re-add it
+ * if required. If the orig entry changed, the new one which
+ * is a non-temporary entry is preferred.
*/
- common->flags &= ~BATADV_TT_CLIENT_TEMP;
+ if (common->flags & BATADV_TT_CLIENT_TEMP) {
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ common->flags &= ~BATADV_TT_CLIENT_TEMP;
+ }
/* the change can carry possible "attribute" flags like the
* TT_CLIENT_WIFI, therefore they have to be copied in the
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index fe129663bd3f..f52bcbf2e58c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -526,6 +526,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_sco))
+ return -EINVAL;
+
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 04257a0e3534..0260c84ed83c 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -84,9 +84,11 @@ static void update_classid(struct cgroup_subsys_state *css, void *v)
css_task_iter_end(&it);
}
-static void cgrp_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void cgrp_attach(struct cgroup_taskset *tset)
{
+ struct cgroup_subsys_state *css;
+
+ cgroup_taskset_first(tset, &css);
update_classid(css,
(void *)(unsigned long)css_cls_state(css)->classid);
}
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 053d60c33395..f1efbc39ef6b 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -233,13 +233,14 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
return 0;
}
-static void net_prio_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void net_prio_attach(struct cgroup_taskset *tset)
{
struct task_struct *p;
- void *v = (void *)(unsigned long)css->cgroup->id;
+ struct cgroup_subsys_state *css;
+
+ cgroup_taskset_for_each(p, css, tset) {
+ void *v = (void *)(unsigned long)css->cgroup->id;
- cgroup_taskset_for_each(p, tset) {
task_lock(p);
iterate_fd(p->files, 0, update_netprio, v);
task_unlock(p);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 152b9c70e252..b2df375ec9c2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3643,7 +3643,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
serr->ee.ee_info = tstype;
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
- if (sk->sk_protocol == IPPROTO_TCP)
+ if (sk->sk_protocol == IPPROTO_TCP &&
+ sk->sk_type == SOCK_STREAM)
serr->ee.ee_data -= sk->sk_tskey;
}
@@ -4268,7 +4269,7 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
return NULL;
}
- memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len,
+ memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
2 * ETH_ALEN);
skb->mac_header += VLAN_HLEN;
return skb;
diff --git a/net/core/sock.c b/net/core/sock.c
index 1278d7b7bd9a..565bab7baca9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -433,8 +433,6 @@ static bool sock_needs_netstamp(const struct sock *sk)
}
}
-#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
-
static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
{
if (sk->sk_flags & flags) {
@@ -874,7 +872,8 @@ set_rcvbuf:
if (val & SOF_TIMESTAMPING_OPT_ID &&
!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
- if (sk->sk_protocol == IPPROTO_TCP) {
+ if (sk->sk_protocol == IPPROTO_TCP &&
+ sk->sk_type == SOCK_STREAM) {
if (sk->sk_state != TCP_ESTABLISHED) {
ret = -EINVAL;
break;
@@ -1543,7 +1542,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
*/
is_charged = sk_filter_charge(newsk, filter);
- if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
+ if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newsk->sk_destruct = NULL;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index eebf5ac8ce18..13d6b1a6e0fc 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -678,6 +678,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
+ if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
+ return -EINVAL;
+
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 11c4ca13ec3b..5c5db6636704 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -257,6 +257,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
int try_loading_module = 0;
int err;
+ if (protocol < 0 || protocol >= IPPROTO_MAX)
+ return -EINVAL;
+
sock->state = SS_UNCONNECTED;
/* Look for the requested type/protocol pair. */
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cc8f3e506cde..473447593060 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1155,6 +1155,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
@@ -1193,6 +1194,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
case NETDEV_CHANGEMTU:
rt_cache_flush(net);
break;
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+ /* flush all routes if dev is linked to or unlinked from
+ * an L3 master device (e.g., VRF)
+ */
+ if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+ fib_disable_ip(dev, NETDEV_DOWN, true);
+ break;
}
return NOTIFY_DONE;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index e0fcbbbcfe54..bd903fe0f750 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -24,6 +24,7 @@ struct fou {
u16 type;
struct udp_offload udp_offloads;
struct list_head list;
+ struct rcu_head rcu;
};
#define FOU_F_REMCSUM_NOPARTIAL BIT(0)
@@ -417,7 +418,7 @@ static void fou_release(struct fou *fou)
list_del(&fou->list);
udp_tunnel_sock_release(sock);
- kfree(fou);
+ kfree_rcu(fou, rcu);
}
static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index a35584176535..c187c60e3e0c 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -60,6 +60,7 @@ config NFT_REJECT_IPV4
config NFT_DUP_IPV4
tristate "IPv4 nf_tables packet duplication support"
+ depends on !NF_CONNTRACK || NF_CONNTRACK
select NF_DUP_IPV4
help
This module enables IPv4 packet duplication support for nf_tables.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7aa13bd3de29..205e6745393f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1493,7 +1493,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
if (likely(sk->sk_rx_dst))
skb_dst_drop(skb);
else
- skb_dst_force(skb);
+ skb_dst_force_safe(skb);
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
@@ -1721,8 +1721,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- if (dst) {
- dst_hold(dst);
+ if (dst && dst_hold_safe(dst)) {
sk->sk_rx_dst = dst;
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a800cee88035..412a920fe0ec 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3150,7 +3150,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_fastopen_request *fo = tp->fastopen_req;
- int syn_loss = 0, space, err = 0, copied;
+ int syn_loss = 0, space, err = 0;
unsigned long last_syn_loss = 0;
struct sk_buff *syn_data;
@@ -3188,17 +3188,18 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
goto fallback;
syn_data->ip_summed = CHECKSUM_PARTIAL;
memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
- copied = copy_from_iter(skb_put(syn_data, space), space,
- &fo->data->msg_iter);
- if (unlikely(!copied)) {
- kfree_skb(syn_data);
- goto fallback;
- }
- if (copied != space) {
- skb_trim(syn_data, copied);
- space = copied;
+ if (space) {
+ int copied = copy_from_iter(skb_put(syn_data, space), space,
+ &fo->data->msg_iter);
+ if (unlikely(!copied)) {
+ kfree_skb(syn_data);
+ goto fallback;
+ }
+ if (copied != space) {
+ skb_trim(syn_data, copied);
+ space = copied;
+ }
}
-
/* No more data pending in inet_wait_for_connect() */
if (space == fo->size)
fo->data = NULL;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7082fb79d876..233efa67dc3d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -350,6 +350,12 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
setup_timer(&ndev->rs_timer, addrconf_rs_timer,
(unsigned long)ndev);
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
+
+ if (ndev->cnf.stable_secret.initialized)
+ ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ else
+ ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
+
ndev->cnf.mtu6 = dev->mtu;
ndev->cnf.sysctl = NULL;
ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -2454,7 +2460,7 @@ ok:
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (in6_dev->cnf.optimistic_dad &&
!net->ipv6.devconf_all->forwarding && sllao)
- addr_flags = IFA_F_OPTIMISTIC;
+ addr_flags |= IFA_F_OPTIMISTIC;
#endif
/* Do not allow to create too much of autoconfigured
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 8ec0df75f1c4..9f5137cd604e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -109,6 +109,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
int try_loading_module = 0;
int err;
+ if (protocol < 0 || protocol >= IPPROTO_MAX)
+ return -EINVAL;
+
/* Look for the requested type/protocol pair. */
lookup_protocol:
err = -ESOCKTNOSUPPORT;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 938d03ce5e4b..f37f18b6b40c 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1570,13 +1570,11 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
return -EEXIST;
} else {
t = nt;
-
- ip6gre_tunnel_unlink(ign, t);
- ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
- ip6gre_tunnel_link(ign, t);
- netdev_state_change(dev);
}
+ ip6gre_tunnel_unlink(ign, t);
+ ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+ ip6gre_tunnel_link(ign, t);
return 0;
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index f6a024e141e5..e10a04c9cdc7 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -49,6 +49,7 @@ config NFT_REJECT_IPV6
config NFT_DUP_IPV6
tristate "IPv6 nf_tables packet duplication support"
+ depends on !NF_CONNTRACK || NF_CONNTRACK
select NF_DUP_IPV6
help
This module enables IPv6 packet duplication support for nf_tables.
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5382c2662fa2..f03d2b0445fd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -93,10 +93,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- if (dst) {
+ if (dst && dst_hold_safe(dst)) {
const struct rt6_info *rt = (const struct rt6_info *)dst;
- dst_hold(dst);
sk->sk_rx_dst = dst;
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index e6aa48b5395c..923abd6b3064 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1086,6 +1086,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
struct sock *sk;
struct irda_sock *self;
+ if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
+ return -EINVAL;
+
if (net != &init_net)
return -EAFNOSUPPORT;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2d1c4c35186d..166a29fe6c35 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1169,8 +1169,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
* rc isn't initialized here yet, so ignore it
*/
__ieee80211_vht_handle_opmode(sdata, sta,
- params->opmode_notif,
- band, false);
+ params->opmode_notif, band);
}
if (ieee80211_vif_is_mesh(&sdata->vif))
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c30b6842ed9f..b84f6aa32c08 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1716,10 +1716,10 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
- enum ieee80211_band band, bool nss_only);
+ enum ieee80211_band band);
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
- enum ieee80211_band band, bool nss_only);
+ enum ieee80211_band band);
void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap);
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 123b26d177e8..1c342e2592c4 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1379,21 +1379,26 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
*/
if (has_80211h_pwr &&
(!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) {
+ new_ap_level = pwr_level_80211h;
+
+ if (sdata->ap_power_level == new_ap_level)
+ return 0;
+
sdata_dbg(sdata,
"Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
pwr_level_80211h, chan_pwr, pwr_reduction_80211h,
sdata->u.mgd.bssid);
- new_ap_level = pwr_level_80211h;
} else { /* has_cisco_pwr is always true here. */
+ new_ap_level = pwr_level_cisco;
+
+ if (sdata->ap_power_level == new_ap_level)
+ return 0;
+
sdata_dbg(sdata,
"Limiting TX power to %d dBm as advertised by %pM\n",
pwr_level_cisco, sdata->u.mgd.bssid);
- new_ap_level = pwr_level_cisco;
}
- if (sdata->ap_power_level == new_ap_level)
- return 0;
-
sdata->ap_power_level = new_ap_level;
if (__ieee80211_recalc_txpower(sdata))
return BSS_CHANGED_TXPOWER;
@@ -3577,7 +3582,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (sta && elems.opmode_notif)
ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif,
- rx_status->band, true);
+ rx_status->band);
mutex_unlock(&local->sta_mtx);
changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1f827539d828..bc081850ac0e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2735,8 +2735,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
- opmode, status->band,
- false);
+ opmode, status->band);
goto handled;
}
default:
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 08af2b307945..3943d4bf289c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1644,6 +1644,29 @@ void ieee80211_stop_device(struct ieee80211_local *local)
drv_stop(local);
}
+static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
+ bool aborted)
+{
+ /* It's possible that we don't handle the scan completion in
+ * time during suspend, so if it's still marked as completed
+ * here, queue the work and flush it to clean things up.
+ * Instead of calling the worker function directly here, we
+ * really queue it to avoid potential races with other flows
+ * scheduling the same work.
+ */
+ if (test_bit(SCAN_COMPLETED, &local->scanning)) {
+ /* If coming from reconfiguration failure, abort the scan so
+ * we don't attempt to continue a partial HW scan - which is
+ * possible otherwise if (e.g.) the 2.4 GHz portion was the
+ * completed scan, and a 5 GHz portion is still pending.
+ */
+ if (aborted)
+ set_bit(SCAN_ABORTED, &local->scanning);
+ ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+ flush_delayed_work(&local->scan_work);
+ }
+}
+
static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
@@ -1663,6 +1686,8 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
local->suspended = false;
local->in_reconfig = false;
+ ieee80211_flush_completed_scan(local, true);
+
/* scheduled scan clearly can't be running any more, but tell
* cfg80211 and clear local state
*/
@@ -1701,6 +1726,27 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
mutex_unlock(&local->chanctx_mtx);
}
+static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ /* add STAs back */
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta, &local->sta_list, list) {
+ enum ieee80211_sta_state state;
+
+ if (!sta->uploaded || sta->sdata != sdata)
+ continue;
+
+ for (state = IEEE80211_STA_NOTEXIST;
+ state < sta->sta_state; state++)
+ WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
+ state + 1));
+ }
+ mutex_unlock(&local->sta_mtx);
+}
+
int ieee80211_reconfig(struct ieee80211_local *local)
{
struct ieee80211_hw *hw = &local->hw;
@@ -1836,50 +1882,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
WARN_ON(drv_add_chanctx(local, ctx));
mutex_unlock(&local->chanctx_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata))
- continue;
- ieee80211_assign_chanctx(local, sdata);
- }
-
sdata = rtnl_dereference(local->monitor_sdata);
if (sdata && ieee80211_sdata_running(sdata))
ieee80211_assign_chanctx(local, sdata);
}
- /* add STAs back */
- mutex_lock(&local->sta_mtx);
- list_for_each_entry(sta, &local->sta_list, list) {
- enum ieee80211_sta_state state;
-
- if (!sta->uploaded)
- continue;
-
- /* AP-mode stations will be added later */
- if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
- continue;
-
- for (state = IEEE80211_STA_NOTEXIST;
- state < sta->sta_state; state++)
- WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
- state + 1));
- }
- mutex_unlock(&local->sta_mtx);
-
- /* reconfigure tx conf */
- if (hw->queues >= IEEE80211_NUM_ACS) {
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- !ieee80211_sdata_running(sdata))
- continue;
-
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- drv_conf_tx(local, sdata, i,
- &sdata->tx_conf[i]);
- }
- }
-
/* reconfigure hardware */
ieee80211_hw_config(local, ~0);
@@ -1892,6 +1899,22 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (!ieee80211_sdata_running(sdata))
continue;
+ ieee80211_assign_chanctx(local, sdata);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MONITOR:
+ break;
+ default:
+ ieee80211_reconfig_stations(sdata);
+ /* fall through */
+ case NL80211_IFTYPE_AP: /* AP stations are handled later */
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ drv_conf_tx(local, sdata, i,
+ &sdata->tx_conf[i]);
+ break;
+ }
+
/* common change flags for all interface types */
changed = BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_ERP_PREAMBLE |
@@ -2077,17 +2100,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mb();
local->resuming = false;
- /* It's possible that we don't handle the scan completion in
- * time during suspend, so if it's still marked as completed
- * here, queue the work and flush it to clean things up.
- * Instead of calling the worker function directly here, we
- * really queue it to avoid potential races with other flows
- * scheduling the same work.
- */
- if (test_bit(SCAN_COMPLETED, &local->scanning)) {
- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
- flush_delayed_work(&local->scan_work);
- }
+ ieee80211_flush_completed_scan(local, false);
if (local->open_count && !reconfig_due_to_wowlan)
drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index ff1c798921a6..c38b2f07a919 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -378,7 +378,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
- enum ieee80211_band band, bool nss_only)
+ enum ieee80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
@@ -401,9 +401,6 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
changed |= IEEE80211_RC_NSS_CHANGED;
}
- if (nss_only)
- return changed;
-
switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
@@ -430,13 +427,12 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
- enum ieee80211_band band, bool nss_only)
+ enum ieee80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
- u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode,
- band, nss_only);
+ u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
if (changed > 0)
rate_control_rate_update(local, sband, sta, changed);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 4b3b9b310c3a..b18c5ed42d95 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -27,6 +27,8 @@
*/
#define MAX_MP_SELECT_LABELS 4
+#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
+
static int zero = 0;
static int label_limit = (1 << 20) - 1;
@@ -341,7 +343,13 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
}
}
- err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb);
+ /* If via wasn't specified then send out using device address */
+ if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
+ err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
+ out_dev->dev_addr, skb);
+ else
+ err = neigh_xmit(nh->nh_via_table, out_dev,
+ mpls_nh_via(rt, nh), skb);
if (err)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
@@ -559,6 +567,10 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
if (!mpls_dev_get(dev))
goto errout;
+ if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
+ (dev->addr_len != nh->nh_via_alen))
+ goto errout;
+
RCU_INIT_POINTER(nh->nh_dev, dev);
if (!(dev->flags & IFF_UP)) {
@@ -630,10 +642,14 @@ static int mpls_nh_build(struct net *net, struct mpls_route *rt,
goto errout;
}
- err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
- __mpls_nh_via(rt, nh));
- if (err)
- goto errout;
+ if (via) {
+ err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
+ __mpls_nh_via(rt, nh));
+ if (err)
+ goto errout;
+ } else {
+ nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
+ }
err = mpls_nh_assign_dev(net, rt, nh, oif);
if (err)
@@ -715,9 +731,6 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg,
nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
}
- if (!nla_via)
- goto errout;
-
err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
rtnh->rtnh_ifindex, nla_via, nla_newdst);
if (err)
@@ -1227,6 +1240,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->rc_label = LABEL_NOT_SPECIFIED;
cfg->rc_protocol = rtm->rtm_protocol;
+ cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
cfg->rc_nlflags = nlh->nlmsg_flags;
cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
cfg->rc_nlinfo.nlh = nlh;
@@ -1340,7 +1354,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
nh->nh_label))
goto nla_put_failure;
- if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
+ if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
+ nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
dev = rtnl_dereference(nh->nh_dev);
@@ -1381,7 +1396,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
nh->nh_labels,
nh->nh_label))
goto nla_put_failure;
- if (nla_put_via(skb, nh->nh_via_table,
+ if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
+ nla_put_via(skb, nh->nh_via_table,
mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
@@ -1448,7 +1464,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
if (nh->nh_dev)
payload += nla_total_size(4); /* RTA_OIF */
- payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */
+ if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
+ payload += nla_total_size(2 + nh->nh_via_alen);
if (nh->nh_labels) /* RTA_NEWDST */
payload += nla_total_size(nh->nh_labels * 4);
} else {
@@ -1457,7 +1474,9 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
for_nexthops(rt) {
nhsize += nla_total_size(sizeof(struct rtnexthop));
- nhsize += nla_total_size(2 + nh->nh_via_alen);
+ /* RTA_VIA */
+ if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
+ nhsize += nla_total_size(2 + nh->nh_via_alen);
if (nh->nh_labels)
nhsize += nla_total_size(nh->nh_labels * 4);
} endfor_nexthops(rt);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index cdd01e6416db..fb31aa87de81 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -54,10 +54,10 @@ static int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
unsigned int ttl;
/* Obtain the ttl */
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (dst->ops->family == AF_INET) {
ttl = ip_hdr(skb)->ttl;
rt = (struct rtable *)dst;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (dst->ops->family == AF_INET6) {
ttl = ipv6_hdr(skb)->hop_limit;
rt6 = (struct rt6_info *)dst;
} else {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 93cc4737018f..2cb429d34c03 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -89,6 +89,7 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
}
static void nft_ctx_init(struct nft_ctx *ctx,
+ struct net *net,
const struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct nft_af_info *afi,
@@ -96,7 +97,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
struct nft_chain *chain,
const struct nlattr * const *nla)
{
- ctx->net = sock_net(skb->sk);
+ ctx->net = net;
ctx->afi = afi;
ctx->table = table;
ctx->chain = chain;
@@ -672,15 +673,14 @@ err:
return ret;
}
-static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_newtable(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
const struct nlattr *name;
struct nft_af_info *afi;
struct nft_table *table;
- struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
u32 flags = 0;
struct nft_ctx ctx;
@@ -706,7 +706,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
if (nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
- nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
return nf_tables_updtable(&ctx);
}
@@ -730,7 +730,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
INIT_LIST_HEAD(&table->sets);
table->flags = flags;
- nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
if (err < 0)
goto err3;
@@ -810,18 +810,17 @@ out:
return err;
}
-static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_deltable(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_af_info *afi;
struct nft_table *table;
- struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nft_ctx ctx;
- nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla);
if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL)
return nft_flush(&ctx, family);
@@ -1221,8 +1220,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
}
}
-static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_newchain(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
@@ -1232,7 +1231,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
struct nft_chain *chain;
struct nft_base_chain *basechain = NULL;
struct nlattr *ha[NFTA_HOOK_MAX + 1];
- struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct net_device *dev = NULL;
u8 policy = NF_ACCEPT;
@@ -1313,7 +1311,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
return PTR_ERR(stats);
}
- nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
sizeof(struct nft_trans_chain));
if (trans == NULL) {
@@ -1461,7 +1459,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
if (err < 0)
goto err1;
- nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN);
if (err < 0)
goto err2;
@@ -1476,15 +1474,14 @@ err1:
return err;
}
-static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_delchain(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_af_info *afi;
struct nft_table *table;
struct nft_chain *chain;
- struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nft_ctx ctx;
@@ -1506,7 +1503,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
if (chain->use > 0)
return -EBUSY;
- nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
return nft_delchain(&ctx);
}
@@ -2010,13 +2007,12 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
static struct nft_expr_info *info;
-static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_af_info *afi;
- struct net *net = sock_net(skb->sk);
struct nft_table *table;
struct nft_chain *chain;
struct nft_rule *rule, *old_rule = NULL;
@@ -2075,7 +2071,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
return PTR_ERR(old_rule);
}
- nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
n = 0;
size = 0;
@@ -2176,13 +2172,12 @@ err1:
return err;
}
-static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_delrule(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_af_info *afi;
- struct net *net = sock_net(skb->sk);
struct nft_table *table;
struct nft_chain *chain = NULL;
struct nft_rule *rule;
@@ -2205,7 +2200,7 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
return PTR_ERR(chain);
}
- nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
if (chain) {
if (nla[NFTA_RULE_HANDLE]) {
@@ -2344,12 +2339,11 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
[NFTA_SET_DESC_SIZE] = { .type = NLA_U32 },
};
-static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
+static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
const struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
- struct net *net = sock_net(skb->sk);
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_af_info *afi = NULL;
struct nft_table *table = NULL;
@@ -2371,7 +2365,7 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
return -ENOENT;
}
- nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
return 0;
}
@@ -2623,6 +2617,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
+ struct net *net = sock_net(skb->sk);
const struct nft_set *set;
struct nft_ctx ctx;
struct sk_buff *skb2;
@@ -2630,7 +2625,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
int err;
/* Verify existence before starting dump */
- err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+ err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla);
if (err < 0)
return err;
@@ -2693,14 +2688,13 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
return 0;
}
-static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
const struct nft_set_ops *ops;
struct nft_af_info *afi;
- struct net *net = sock_net(skb->sk);
struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
@@ -2798,7 +2792,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(table))
return PTR_ERR(table);
- nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]);
if (IS_ERR(set)) {
@@ -2882,8 +2876,8 @@ static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set
nft_set_destroy(set);
}
-static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_delset(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
@@ -2896,7 +2890,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
if (nla[NFTA_SET_TABLE] == NULL)
return -EINVAL;
- err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+ err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla);
if (err < 0)
return err;
@@ -3024,7 +3018,7 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
[NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
};
-static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
+static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
const struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[],
@@ -3033,7 +3027,6 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_af_info *afi;
struct nft_table *table;
- struct net *net = sock_net(skb->sk);
afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
if (IS_ERR(afi))
@@ -3045,7 +3038,7 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
if (!trans && (table->flags & NFT_TABLE_INACTIVE))
return -ENOENT;
- nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
return 0;
}
@@ -3135,6 +3128,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct net *net = sock_net(skb->sk);
const struct nft_set *set;
struct nft_set_dump_args args;
struct nft_ctx ctx;
@@ -3150,8 +3144,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
if (err < 0)
return err;
- err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla,
- false);
+ err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh,
+ (void *)nla, false);
if (err < 0)
return err;
@@ -3212,11 +3206,12 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
+ struct net *net = sock_net(skb->sk);
const struct nft_set *set;
struct nft_ctx ctx;
int err;
- err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
+ err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false);
if (err < 0)
return err;
@@ -3528,11 +3523,10 @@ err1:
return err;
}
-static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
- struct net *net = sock_net(skb->sk);
const struct nlattr *attr;
struct nft_set *set;
struct nft_ctx ctx;
@@ -3541,7 +3535,7 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
return -EINVAL;
- err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true);
+ err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, true);
if (err < 0)
return err;
@@ -3623,8 +3617,8 @@ err1:
return err;
}
-static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
+static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
const struct nlattr *attr;
@@ -3635,7 +3629,7 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
return -EINVAL;
- err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
+ err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false);
if (err < 0)
return err;
@@ -4030,7 +4024,8 @@ static int nf_tables_abort(struct sk_buff *skb)
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
- list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+ list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list,
+ list) {
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 46453ab318db..77afe913d03d 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -295,8 +295,6 @@ replay:
if (!skb)
return netlink_ack(oskb, nlh, -ENOMEM);
- skb->sk = oskb->sk;
-
nfnl_lock(subsys_id);
ss = rcu_dereference_protected(table[subsys_id].subsys,
lockdep_is_held(&table[subsys_id].mutex));
@@ -381,7 +379,7 @@ replay:
goto ack;
if (nc->call_batch) {
- err = nc->call_batch(net->nfnl, skb, nlh,
+ err = nc->call_batch(net, net->nfnl, skb, nlh,
(const struct nlattr **)cda);
}
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7d81d280cb4f..861c6615253b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -365,8 +365,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
break;
}
+ nfnl_ct = rcu_dereference(nfnl_ct_hook);
+
if (queue->flags & NFQA_CFG_F_CONNTRACK) {
- nfnl_ct = rcu_dereference(nfnl_ct_hook);
if (nfnl_ct != NULL) {
ct = nfnl_ct->get_ct(entskb, &ctinfo);
if (ct != NULL)
@@ -1064,9 +1065,10 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (entry == NULL)
return -ENOENT;
+ /* rcu lock already held from nfnl->call_rcu. */
+ nfnl_ct = rcu_dereference(nfnl_ct_hook);
+
if (nfqa[NFQA_CT]) {
- /* rcu lock already held from nfnl->call_rcu. */
- nfnl_ct = rcu_dereference(nfnl_ct_hook);
if (nfnl_ct != NULL)
ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
}
@@ -1417,6 +1419,7 @@ static int __init nfnetlink_queue_init(void)
cleanup_netlink_notifier:
netlink_unregister_notifier(&nfqnl_rtnl_notifier);
+ unregister_pernet_subsys(&nfnl_queue_net_ops);
out:
return status;
}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c2cc11168fd5..3e8892216f94 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -53,6 +53,8 @@ struct ovs_conntrack_info {
struct md_labels labels;
};
+static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
+
static u16 key_to_nfproto(const struct sw_flow_key *key)
{
switch (ntohs(key->eth.type)) {
@@ -141,6 +143,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
* previously sent the packet to conntrack via the ct action.
*/
static void ovs_ct_update_key(const struct sk_buff *skb,
+ const struct ovs_conntrack_info *info,
struct sw_flow_key *key, bool post_ct)
{
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
@@ -158,13 +161,15 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
zone = nf_ct_zone(ct);
} else if (post_ct) {
state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
+ if (info)
+ zone = &info->zone;
}
__ovs_ct_update_key(key, state, zone, ct);
}
void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
{
- ovs_ct_update_key(skb, key, false);
+ ovs_ct_update_key(skb, NULL, key, false);
}
int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
@@ -418,7 +423,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
}
}
- ovs_ct_update_key(skb, key, true);
+ ovs_ct_update_key(skb, info, key, true);
return 0;
}
@@ -708,7 +713,7 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
nf_conntrack_get(&ct_info.ct->ct_general);
return 0;
err_free_ct:
- nf_conntrack_free(ct_info.ct);
+ __ovs_ct_free_action(&ct_info);
return err;
}
@@ -750,6 +755,11 @@ void ovs_ct_free_action(const struct nlattr *a)
{
struct ovs_conntrack_info *ct_info = nla_data(a);
+ __ovs_ct_free_action(ct_info);
+}
+
+static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
+{
if (ct_info->helper)
module_put(ct_info->helper->me);
if (ct_info->ct)
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index b41e9ea2ffff..f53bf3b6558b 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -49,7 +49,6 @@
struct rfkill {
spinlock_t lock;
- const char *name;
enum rfkill_type type;
unsigned long state;
@@ -73,6 +72,7 @@ struct rfkill {
struct delayed_work poll_work;
struct work_struct uevent_work;
struct work_struct sync_work;
+ char name[];
};
#define to_rfkill(d) container_of(d, struct rfkill, dev)
@@ -876,14 +876,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
return NULL;
- rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
+ rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
if (!rfkill)
return NULL;
spin_lock_init(&rfkill->lock);
INIT_LIST_HEAD(&rfkill->node);
rfkill->type = type;
- rfkill->name = name;
+ strcpy(rfkill->name, name);
rfkill->ops = ops;
rfkill->data = ops_data;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7ec667dd4ce1..b5c2cf2aa6d4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -950,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
}
lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
if (!netif_is_multiqueue(dev))
- sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ sch->flags |= TCQ_F_ONETXQUEUE;
}
sch->handle = handle;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index acb45b8c2a9d..ec529121f38a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -323,14 +323,13 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
}
}
- rcu_read_unlock();
-
if (baddr) {
fl6->saddr = baddr->v6.sin6_addr;
fl6->fl6_sport = baddr->v6.sin6_port;
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
}
+ rcu_read_unlock();
out:
if (!IS_ERR_OR_NULL(dst)) {
@@ -642,6 +641,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
struct sock *newsk;
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct sctp6_sock *newsctp6sk;
+ struct ipv6_txoptions *opt;
newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
if (!newsk)
@@ -661,6 +661,13 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ rcu_read_lock();
+ opt = rcu_dereference(np->opt);
+ if (opt)
+ opt = ipv6_dup_options(newsk, opt);
+ RCU_INIT_POINTER(newnp->opt, opt);
+ rcu_read_unlock();
+
/* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
* and getpeername().
*/
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 7e8f0a117106..c0380cfb16ae 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -324,6 +324,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
"illegal chunk");
+ sctp_chunk_hold(chunk);
sctp_outq_tail_data(q, chunk);
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
@@ -1251,6 +1252,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
*/
sack_a_rwnd = ntohl(sack->a_rwnd);
+ asoc->peer.zero_window_announced = !sack_a_rwnd;
outstanding = q->outstanding_bytes;
if (outstanding < sack_a_rwnd)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 763e06a55155..5d6a03fad378 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1652,7 +1652,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
/* Set an expiration time for the cookie. */
cookie->c.expiration = ktime_add(asoc->cookie_life,
- ktime_get());
+ ktime_get_real());
/* Copy the peer's init packet. */
memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
@@ -1780,7 +1780,7 @@ no_hmac:
if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
kt = skb_get_ktime(skb);
else
- kt = ktime_get();
+ kt = ktime_get_real();
if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
/*
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 6f46aa16cb76..cd34a4a34065 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5412,7 +5412,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
if (asoc->overall_error_count >= asoc->max_retrans) {
- if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
+ if (asoc->peer.zero_window_announced &&
+ asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
/*
* We are here likely because the receiver had its rwnd
* closed for a while and we have not been able to
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 106bb09636f1..2a1e8ba2808c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1952,8 +1952,6 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
/* Now send the (possibly) fragmented message. */
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
- sctp_chunk_hold(chunk);
-
/* Do accounting for the write space. */
sctp_set_owner_w(chunk);
@@ -1966,15 +1964,13 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
* breaks.
*/
err = sctp_primitive_SEND(net, asoc, datamsg);
+ sctp_datamsg_put(datamsg);
/* Did the lower layer accept the chunk? */
- if (err) {
- sctp_datamsg_free(datamsg);
+ if (err)
goto out_free;
- }
pr_debug("%s: we sent primitively\n", __func__);
- sctp_datamsg_put(datamsg);
err = msg_len;
if (unlikely(wait_connect)) {
@@ -7167,6 +7163,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newsk->sk_type = sk->sk_type;
newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
newsk->sk_flags = sk->sk_flags;
+ newsk->sk_tsflags = sk->sk_tsflags;
newsk->sk_no_check_tx = sk->sk_no_check_tx;
newsk->sk_no_check_rx = sk->sk_no_check_rx;
newsk->sk_reuse = sk->sk_reuse;
@@ -7199,6 +7196,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newinet->mc_ttl = 1;
newinet->mc_index = 0;
newinet->mc_list = NULL;
+
+ if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
+ net_enable_timestamp();
}
static inline void sctp_copy_descendant(struct sock *sk_to,
diff --git a/net/socket.c b/net/socket.c
index 456fadb3d819..29822d6dd91e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1695,6 +1695,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
/* We assume all kernel code knows the size of sockaddr_storage */
msg.msg_namelen = 0;
+ msg.msg_iocb = NULL;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 95f82d8d4888..229956bf8457 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -353,20 +353,12 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
struct rpc_xprt *xprt = req->rq_xprt;
struct svc_serv *bc_serv = xprt->bc_serv;
- struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
spin_lock(&xprt->bc_pa_lock);
list_del(&req->rq_bc_pa_list);
xprt_dec_alloc_count(xprt, 1);
spin_unlock(&xprt->bc_pa_lock);
- if (copied <= rq_rcv_buf->head[0].iov_len) {
- rq_rcv_buf->head[0].iov_len = copied;
- rq_rcv_buf->page_len = 0;
- } else {
- rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
- }
-
req->rq_private_buf.len = copied;
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f14f24ee9983..73ad57a59989 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -250,11 +250,11 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
}
EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
-static int rpc_wait_bit_killable(struct wait_bit_key *key)
+static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
{
- if (fatal_signal_pending(current))
- return -ERESTARTSYS;
freezable_schedule_unsafe();
+ if (signal_pending_state(mode, current))
+ return -ERESTARTSYS;
return 0;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 7fccf9675df8..cc9852897395 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1363,7 +1363,19 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
+
+ /* Adjust the argument buffer length */
rqstp->rq_arg.len = req->rq_private_buf.len;
+ if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
+ rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
+ rqstp->rq_arg.page_len = 0;
+ } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
+ rqstp->rq_arg.page_len)
+ rqstp->rq_arg.page_len = rqstp->rq_arg.len -
+ rqstp->rq_arg.head[0].iov_len;
+ else
+ rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
+ rqstp->rq_arg.page_len;
/* reset result send buffer "put" position */
resv->iov_len = 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1c3c1f3a3ec4..b1314c099417 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2263,14 +2263,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
- err = mutex_lock_interruptible(&u->readlock);
- if (unlikely(err)) {
- /* recvmsg() in non blocking mode is supposed to return -EAGAIN
- * sk_rcvtimeo is not honored by mutex_lock_interruptible()
- */
- err = noblock ? -EAGAIN : -ERESTARTSYS;
- goto out;
- }
+ mutex_lock(&u->readlock);
if (flags & MSG_PEEK)
skip = sk_peek_offset(sk, flags);
@@ -2314,12 +2307,12 @@ again:
timeo = unix_stream_data_wait(sk, timeo, last,
last_len);
- if (signal_pending(current) ||
- mutex_lock_interruptible(&u->readlock)) {
+ if (signal_pending(current)) {
err = sock_intr_errno(timeo);
goto out;
}
+ mutex_lock(&u->readlock);
continue;
unlock:
unix_state_unlock(sk);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 72de6989dd12..d4786f2802aa 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7973,8 +7973,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
if (!(rdev->wiphy.features &
NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
- !(rdev->wiphy.features & NL80211_FEATURE_QUIET))
+ !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) {
+ kzfree(connkeys);
return -EINVAL;
+ }
connect.flags |= ASSOC_REQ_USE_RRM;
}
@@ -9535,6 +9537,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
if (new_triggers.tcp && new_triggers.tcp->sock)
sock_release(new_triggers.tcp->sock);
kfree(new_triggers.tcp);
+ kfree(new_triggers.nd_config);
return err;
}
#endif
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 0a4f5481ab83..3b0ce1c484a3 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3011,6 +3011,7 @@ int set_regdom(const struct ieee80211_regdomain *rd,
break;
default:
WARN(1, "invalid initiator %d\n", lr->initiator);
+ kfree(rd);
return -EINVAL;
}
@@ -3203,8 +3204,10 @@ int __init regulatory_init(void)
/* We always try to get an update for the static regdomain */
err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
if (err) {
- if (err == -ENOMEM)
+ if (err == -ENOMEM) {
+ platform_device_unregister(reg_pdev);
return err;
+ }
/*
* N.B. kobject_uevent_env() can fail mainly for when we're out
* memory which is handled and propagated appropriately above
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 09bfcbac63bb..948fa5560de5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -303,6 +303,14 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
}
EXPORT_SYMBOL(xfrm_policy_alloc);
+static void xfrm_policy_destroy_rcu(struct rcu_head *head)
+{
+ struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
+
+ security_xfrm_policy_free(policy->security);
+ kfree(policy);
+}
+
/* Destroy xfrm_policy: descendant resources must be released to this moment. */
void xfrm_policy_destroy(struct xfrm_policy *policy)
@@ -312,8 +320,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
- security_xfrm_policy_free(policy->security);
- kfree(policy);
+ call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
}
EXPORT_SYMBOL(xfrm_policy_destroy);
@@ -1214,8 +1221,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
struct xfrm_policy *pol;
struct net *net = sock_net(sk);
+ rcu_read_lock();
read_lock_bh(&net->xfrm.xfrm_policy_lock);
- if ((pol = sk->sk_policy[dir]) != NULL) {
+ pol = rcu_dereference(sk->sk_policy[dir]);
+ if (pol != NULL) {
bool match = xfrm_selector_match(&pol->selector, fl,
sk->sk_family);
int err = 0;
@@ -1239,6 +1248,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
}
out:
read_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ rcu_read_unlock();
return pol;
}
@@ -1307,13 +1317,14 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
#endif
write_lock_bh(&net->xfrm.xfrm_policy_lock);
- old_pol = sk->sk_policy[dir];
- sk->sk_policy[dir] = pol;
+ old_pol = rcu_dereference_protected(sk->sk_policy[dir],
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock));
if (pol) {
pol->curlft.add_time = get_seconds();
pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
xfrm_sk_policy_link(pol, dir);
}
+ rcu_assign_pointer(sk->sk_policy[dir], pol);
if (old_pol) {
if (pol)
xfrm_policy_requeue(old_pol, pol);
@@ -1361,17 +1372,26 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
return newp;
}
-int __xfrm_sk_clone_policy(struct sock *sk)
+int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
{
- struct xfrm_policy *p0 = sk->sk_policy[0],
- *p1 = sk->sk_policy[1];
+ const struct xfrm_policy *p;
+ struct xfrm_policy *np;
+ int i, ret = 0;
- sk->sk_policy[0] = sk->sk_policy[1] = NULL;
- if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
- return -ENOMEM;
- if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
- return -ENOMEM;
- return 0;
+ rcu_read_lock();
+ for (i = 0; i < 2; i++) {
+ p = rcu_dereference(osk->sk_policy[i]);
+ if (p) {
+ np = clone_policy(p, i);
+ if (unlikely(!np)) {
+ ret = -ENOMEM;
+ break;
+ }
+ rcu_assign_pointer(sk->sk_policy[i], np);
+ }
+ }
+ rcu_read_unlock();
+ return ret;
}
static int
@@ -2198,6 +2218,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
xdst = NULL;
route = NULL;
+ sk = sk_const_to_full_sk(sk);
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
num_pols = 1;
pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
@@ -2477,6 +2498,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
pol = NULL;
+ sk = sk_to_full_sk(sk);
if (sk && sk->sk_policy[dir]) {
pol = xfrm_sk_policy_lookup(sk, dir, &fl);
if (IS_ERR(pol)) {
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 1a10d8ac8162..dacf71a43ad4 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -62,7 +62,7 @@ vmlinux_link()
-Wl,--start-group \
${KBUILD_VMLINUX_MAIN} \
-Wl,--end-group \
- -lutil ${1}
+ -lutil -lrt ${1}
rm -f linux
fi
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 963f82430938..bff5c8b329d1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -355,6 +355,8 @@ enum {
((pci)->device == 0x0d0c) || \
((pci)->device == 0x160c))
+#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
+
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
[AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -506,15 +508,36 @@ static void azx_init_pci(struct azx *chip)
}
}
+/*
+ * In BXT-P A0, HD-Audio DMA requests is later than expected,
+ * and makes an audio stream sensitive to system latencies when
+ * 24/32 bits are playing.
+ * Adjusting threshold of DMA fifo to force the DMA request
+ * sooner to improve latency tolerance at the expense of power.
+ */
+static void bxt_reduce_dma_latency(struct azx *chip)
+{
+ u32 val;
+
+ val = azx_readl(chip, SKL_EM4L);
+ val &= (0x3 << 20);
+ azx_writel(chip, SKL_EM4L, val);
+}
+
static void hda_intel_init_chip(struct azx *chip, bool full_reset)
{
struct hdac_bus *bus = azx_bus(chip);
+ struct pci_dev *pci = chip->pci;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, true);
azx_init_chip(chip, full_reset);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, false);
+
+ /* reduce dma latency to avoid noise */
+ if (IS_BROXTON(pci))
+ bxt_reduce_dma_latency(chip);
}
/* calculate runtime delay from LPIB */
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index f8a12ca477f1..4ef2259f88ca 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -778,7 +778,8 @@ static const struct hda_pintbl alienware_pincfgs[] = {
};
static const struct snd_pci_quirk ca0132_quirks[] = {
- SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15", QUIRK_ALIENWARE),
+ SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
+ SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
{}
};
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c8b8ef5246a6..ef198903c0c3 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -955,6 +955,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
*/
static const struct hda_device_id snd_hda_id_conexant[] = {
+ HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
@@ -972,9 +973,9 @@ static const struct hda_device_id snd_hda_id_conexant[] = {
HDA_CODEC_ENTRY(0x14f150ac, "CX20652", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f150b8, "CX20664", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f150b9, "CX20665", patch_conexant_auto),
- HDA_CODEC_ENTRY(0x14f150f1, "CX20721", patch_conexant_auto),
+ HDA_CODEC_ENTRY(0x14f150f1, "CX21722", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f150f2, "CX20722", patch_conexant_auto),
- HDA_CODEC_ENTRY(0x14f150f3, "CX20723", patch_conexant_auto),
+ HDA_CODEC_ENTRY(0x14f150f3, "CX21724", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f150f4, "CX20724", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f1510f, "CX20751/2", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15110, "CX20751/2", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index bdb6f226d006..4b6fb668c91c 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2352,6 +2352,12 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
struct hda_codec *codec = audio_ptr;
int pin_nid = port + 0x04;
+ /* skip notification during system suspend (but not in runtime PM);
+ * the state will be updated at resume
+ */
+ if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
+ return;
+
check_presence_and_report(codec, pin_nid);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9bedf7c85e29..8dd2ac13b3af 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4204,6 +4204,18 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
}
}
+/* additional fixup for Thinkpad T440s noise problem */
+static void alc_fixup_tpt440(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->shutup = alc_no_shutup; /* reduce click noise */
+ spec->gen.mixer_nid = 0; /* reduce background noise */
+ }
+}
+
static void alc_shutup_dell_xps13(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -4578,6 +4590,7 @@ enum {
ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC292_FIXUP_TPT440_DOCK,
+ ALC292_FIXUP_TPT440,
ALC283_FIXUP_BXBT2807_MIC,
ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
ALC282_FIXUP_ASPIRE_V5_PINS,
@@ -4596,6 +4609,7 @@ enum {
ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
+ ALC293_FIXUP_LENOVO_SPK_NOISE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5050,6 +5064,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
},
+ [ALC292_FIXUP_TPT440] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_tpt440,
+ .chained = true,
+ .chain_id = ALC292_FIXUP_TPT440_DOCK,
+ },
[ALC283_FIXUP_BXBT2807_MIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -5187,6 +5207,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
},
+ [ALC293_FIXUP_LENOVO_SPK_NOISE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_aamix,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5325,7 +5351,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440),
SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
@@ -5334,6 +5360,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5343,6 +5370,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5423,6 +5451,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
+ {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
{}
};
@@ -6409,6 +6438,7 @@ static const struct hda_fixup alc662_fixups[] = {
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 714df906249e..41c31db65039 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -741,10 +741,11 @@ snd_rme96_playback_setrate(struct rme96 *rme96,
{
/* change to/from double-speed: reset the DAC (if available) */
snd_rme96_reset_dac(rme96);
+ return 1; /* need to restore volume */
} else {
writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
+ return 0;
}
- return 0;
}
static int
@@ -980,6 +981,7 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err, rate, dummy;
+ bool apply_dac_volume = false;
runtime->dma_area = (void __force *)(rme96->iobase +
RME96_IO_PLAY_BUFFER);
@@ -993,24 +995,26 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
{
/* slave clock */
if ((int)params_rate(params) != rate) {
- spin_unlock_irq(&rme96->lock);
- return -EIO;
- }
- } else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) {
- spin_unlock_irq(&rme96->lock);
- return err;
- }
- if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) {
- spin_unlock_irq(&rme96->lock);
- return err;
+ err = -EIO;
+ goto error;
+ }
+ } else {
+ err = snd_rme96_playback_setrate(rme96, params_rate(params));
+ if (err < 0)
+ goto error;
+ apply_dac_volume = err > 0; /* need to restore volume later? */
}
+
+ err = snd_rme96_playback_setformat(rme96, params_format(params));
+ if (err < 0)
+ goto error;
snd_rme96_setframelog(rme96, params_channels(params), 1);
if (rme96->capture_periodsize != 0) {
if (params_period_size(params) << rme96->playback_frlog !=
rme96->capture_periodsize)
{
- spin_unlock_irq(&rme96->lock);
- return -EBUSY;
+ err = -EBUSY;
+ goto error;
}
}
rme96->playback_periodsize =
@@ -1021,9 +1025,16 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP);
writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER);
}
+
+ err = 0;
+ error:
spin_unlock_irq(&rme96->lock);
-
- return 0;
+ if (apply_dac_volume) {
+ usleep_range(3000, 10000);
+ snd_rme96_apply_dac_volume(rme96);
+ }
+
+ return err;
}
static int
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 9929efc6b9aa..b3ea24d64c50 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1023,24 +1023,18 @@ void arizona_init_dvfs(struct arizona_priv *priv)
}
EXPORT_SYMBOL_GPL(arizona_init_dvfs);
-static unsigned int arizona_sysclk_48k_rates[] = {
+static unsigned int arizona_opclk_ref_48k_rates[] = {
6144000,
12288000,
24576000,
49152000,
- 73728000,
- 98304000,
- 147456000,
};
-static unsigned int arizona_sysclk_44k1_rates[] = {
+static unsigned int arizona_opclk_ref_44k1_rates[] = {
5644800,
11289600,
22579200,
45158400,
- 67737600,
- 90316800,
- 135475200,
};
static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
@@ -1065,11 +1059,11 @@ static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
}
if (refclk % 8000)
- rates = arizona_sysclk_44k1_rates;
+ rates = arizona_opclk_ref_44k1_rates;
else
- rates = arizona_sysclk_48k_rates;
+ rates = arizona_opclk_ref_48k_rates;
- for (ref = 0; ref < ARRAY_SIZE(arizona_sysclk_48k_rates) &&
+ for (ref = 0; ref < ARRAY_SIZE(arizona_opclk_ref_48k_rates) &&
rates[ref] <= refclk; ref++) {
div = 1;
while (rates[ref] / div >= freq && div < 32) {
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index 969e337dc17c..84f5eb07a91b 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -205,18 +205,18 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
/* Left Mixer */
static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 8, 1, 0),
- SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 7, 1, 0),
- SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 8, 1, 0),
- SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 7, 1, 0),
+ SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
+ SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
+ SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
+ SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
};
/* Right Mixer */
static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
- SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 8, 1, 0),
- SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 7, 1, 0),
- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 8, 1, 0),
- SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 7, 1, 0),
+ SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
+ SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
+ SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
+ SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
};
static const char * const es8328_pga_sel[] = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 7fc7b4e3f444..c1b87c5800b1 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -1271,6 +1271,36 @@ static int nau8825_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int nau8825_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nau8825 *nau8825 = dev_get_drvdata(dev);
+
+ disable_irq(client->irq);
+ regcache_cache_only(nau8825->regmap, true);
+ regcache_mark_dirty(nau8825->regmap);
+
+ return 0;
+}
+
+static int nau8825_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nau8825 *nau8825 = dev_get_drvdata(dev);
+
+ regcache_cache_only(nau8825->regmap, false);
+ regcache_sync(nau8825->regmap);
+ enable_irq(client->irq);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops nau8825_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
+};
+
static const struct i2c_device_id nau8825_i2c_ids[] = {
{ "nau8825", 0 },
{ }
@@ -1297,6 +1327,7 @@ static struct i2c_driver nau8825_driver = {
.name = "nau8825",
.of_match_table = of_match_ptr(nau8825_of_ids),
.acpi_match_table = ACPI_PTR(nau8825_acpi_match),
+ .pm = &nau8825_pm,
},
.probe = nau8825_i2c_probe,
.remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index aca479fa7670..1dc68ab08a17 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -80,8 +80,10 @@ int rl6231_calc_dmic_clk(int rate)
}
for (i = 0; i < ARRAY_SIZE(div); i++) {
- /* find divider that gives DMIC frequency below 3MHz */
- if (3000000 * div[i] >= rate)
+ if ((div[i] % 3) == 0)
+ continue;
+ /* find divider that gives DMIC frequency below 3.072MHz */
+ if (3072000 * div[i] >= rate)
return i;
}
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 28132375e427..ef76940f9dcb 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -245,7 +245,7 @@ struct rt5645_priv {
struct snd_soc_jack *hp_jack;
struct snd_soc_jack *mic_jack;
struct snd_soc_jack *btn_jack;
- struct delayed_work jack_detect_work;
+ struct delayed_work jack_detect_work, rcclock_work;
struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
struct rt5645_eq_param_s *eq_param;
@@ -565,12 +565,33 @@ static int rt5645_hweq_put(struct snd_kcontrol *kcontrol,
.put = rt5645_hweq_put \
}
+static int rt5645_spk_put_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
+ int ret;
+
+ cancel_delayed_work_sync(&rt5645->rcclock_work);
+
+ regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
+ RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PU);
+
+ ret = snd_soc_put_volsw(kcontrol, ucontrol);
+
+ queue_delayed_work(system_power_efficient_wq, &rt5645->rcclock_work,
+ msecs_to_jiffies(200));
+
+ return ret;
+}
+
static const struct snd_kcontrol_new rt5645_snd_controls[] = {
/* Speaker Output Volume */
SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL,
RT5645_VOL_L_SFT, RT5645_VOL_R_SFT, 1, 1),
- SOC_DOUBLE_TLV("Speaker Playback Volume", RT5645_SPK_VOL,
- RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, out_vol_tlv),
+ SOC_DOUBLE_EXT_TLV("Speaker Playback Volume", RT5645_SPK_VOL,
+ RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, snd_soc_get_volsw,
+ rt5645_spk_put_volsw, out_vol_tlv),
/* ClassD modulator Speaker Gain Ratio */
SOC_SINGLE_TLV("Speaker ClassD Playback Volume", RT5645_SPO_CLSD_RATIO,
@@ -1498,7 +1519,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
regmap_write(rt5645->regmap, RT5645_PR_BASE +
RT5645_MAMP_INT_REG2, 0xfc00);
snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
- msleep(40);
+ msleep(70);
rt5645->hp_on = true;
} else {
/* depop parameters */
@@ -3122,6 +3143,15 @@ static void rt5645_jack_detect_work(struct work_struct *work)
SND_JACK_BTN_2 | SND_JACK_BTN_3);
}
+static void rt5645_rcclock_work(struct work_struct *work)
+{
+ struct rt5645_priv *rt5645 =
+ container_of(work, struct rt5645_priv, rcclock_work.work);
+
+ regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
+ RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PD);
+}
+
static irqreturn_t rt5645_irq(int irq, void *data)
{
struct rt5645_priv *rt5645 = data;
@@ -3348,6 +3378,27 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Reks"),
},
},
+ {
+ .ident = "Google Edgar",
+ .callback = strago_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+ },
+ },
+ {
+ .ident = "Google Wizpig",
+ .callback = strago_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Wizpig"),
+ },
+ },
+ {
+ .ident = "Google Terra",
+ .callback = strago_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Terra"),
+ },
+ },
{ }
};
@@ -3587,6 +3638,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
}
INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
+ INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
if (rt5645->i2c->irq) {
ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
@@ -3621,6 +3673,7 @@ static int rt5645_i2c_remove(struct i2c_client *i2c)
free_irq(i2c->irq, rt5645);
cancel_delayed_work_sync(&rt5645->jack_detect_work);
+ cancel_delayed_work_sync(&rt5645->rcclock_work);
snd_soc_unregister_codec(&i2c->dev);
regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index dc2b46236c5c..3f1b0f1df809 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -973,12 +973,12 @@
#define RT5670_SCLK_SRC_MCLK (0x0 << 14)
#define RT5670_SCLK_SRC_PLL1 (0x1 << 14)
#define RT5670_SCLK_SRC_RCCLK (0x2 << 14) /* 15MHz */
-#define RT5670_PLL1_SRC_MASK (0x3 << 12)
-#define RT5670_PLL1_SRC_SFT 12
-#define RT5670_PLL1_SRC_MCLK (0x0 << 12)
-#define RT5670_PLL1_SRC_BCLK1 (0x1 << 12)
-#define RT5670_PLL1_SRC_BCLK2 (0x2 << 12)
-#define RT5670_PLL1_SRC_BCLK3 (0x3 << 12)
+#define RT5670_PLL1_SRC_MASK (0x7 << 11)
+#define RT5670_PLL1_SRC_SFT 11
+#define RT5670_PLL1_SRC_MCLK (0x0 << 11)
+#define RT5670_PLL1_SRC_BCLK1 (0x1 << 11)
+#define RT5670_PLL1_SRC_BCLK2 (0x2 << 11)
+#define RT5670_PLL1_SRC_BCLK3 (0x3 << 11)
#define RT5670_PLL1_PD_MASK (0x1 << 3)
#define RT5670_PLL1_PD_SFT 3
#define RT5670_PLL1_PD_1 (0x0 << 3)
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index b4cd7e3bf5f8..69d987a9935c 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -1386,90 +1386,90 @@ static const struct snd_kcontrol_new rt5677_dac_r_mix[] = {
};
static const struct snd_kcontrol_new rt5677_sto1_dac_l_mix[] = {
- SOC_DAPM_SINGLE("ST L Switch", RT5677_STO1_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_STO1_DAC_MIXER,
RT5677_M_ST_DAC1_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
RT5677_M_DAC1_L_STO_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER,
RT5677_M_DAC2_L_STO_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
RT5677_M_DAC1_R_STO_L_SFT, 1, 1),
};
static const struct snd_kcontrol_new rt5677_sto1_dac_r_mix[] = {
- SOC_DAPM_SINGLE("ST R Switch", RT5677_STO1_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_STO1_DAC_MIXER,
RT5677_M_ST_DAC1_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
RT5677_M_DAC1_R_STO_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER,
RT5677_M_DAC2_R_STO_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
RT5677_M_DAC1_L_STO_R_SFT, 1, 1),
};
static const struct snd_kcontrol_new rt5677_mono_dac_l_mix[] = {
- SOC_DAPM_SINGLE("ST L Switch", RT5677_MONO_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_MONO_DAC_MIXER,
RT5677_M_ST_DAC2_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER,
RT5677_M_DAC1_L_MONO_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
RT5677_M_DAC2_L_MONO_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
RT5677_M_DAC2_R_MONO_L_SFT, 1, 1),
};
static const struct snd_kcontrol_new rt5677_mono_dac_r_mix[] = {
- SOC_DAPM_SINGLE("ST R Switch", RT5677_MONO_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_MONO_DAC_MIXER,
RT5677_M_ST_DAC2_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER,
RT5677_M_DAC1_R_MONO_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
RT5677_M_DAC2_R_MONO_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
RT5677_M_DAC2_L_MONO_R_SFT, 1, 1),
};
static const struct snd_kcontrol_new rt5677_dd1_l_mix[] = {
- SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER,
RT5677_M_STO_L_DD1_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER,
RT5677_M_MONO_L_DD1_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER,
RT5677_M_DAC3_L_DD1_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER,
RT5677_M_DAC3_R_DD1_L_SFT, 1, 1),
};
static const struct snd_kcontrol_new rt5677_dd1_r_mix[] = {
- SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER,
RT5677_M_STO_R_DD1_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER,
RT5677_M_MONO_R_DD1_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER,
RT5677_M_DAC3_R_DD1_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER,
RT5677_M_DAC3_L_DD1_R_SFT, 1, 1),
};
static const struct snd_kcontrol_new rt5677_dd2_l_mix[] = {
- SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER,
RT5677_M_STO_L_DD2_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER,
RT5677_M_MONO_L_DD2_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER,
RT5677_M_DAC4_L_DD2_L_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER,
RT5677_M_DAC4_R_DD2_L_SFT, 1, 1),
};
static const struct snd_kcontrol_new rt5677_dd2_r_mix[] = {
- SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER,
RT5677_M_STO_R_DD2_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER,
RT5677_M_MONO_R_DD2_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER,
RT5677_M_DAC4_R_DD2_R_SFT, 1, 1),
- SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER,
+ SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER,
RT5677_M_DAC4_L_DD2_R_SFT, 1, 1),
};
@@ -2596,6 +2596,21 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int rt5677_filter_power_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ msleep(50);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
@@ -3072,19 +3087,26 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
/* DAC Mixer */
SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2,
- RT5677_PWR_DAC_S1F_BIT, 0, NULL, 0),
+ RT5677_PWR_DAC_S1F_BIT, 0, rt5677_filter_power_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2,
- RT5677_PWR_DAC_M2F_L_BIT, 0, NULL, 0),
+ RT5677_PWR_DAC_M2F_L_BIT, 0, rt5677_filter_power_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2,
- RT5677_PWR_DAC_M2F_R_BIT, 0, NULL, 0),
+ RT5677_PWR_DAC_M2F_R_BIT, 0, rt5677_filter_power_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2,
- RT5677_PWR_DAC_M3F_L_BIT, 0, NULL, 0),
+ RT5677_PWR_DAC_M3F_L_BIT, 0, rt5677_filter_power_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2,
- RT5677_PWR_DAC_M3F_R_BIT, 0, NULL, 0),
+ RT5677_PWR_DAC_M3F_R_BIT, 0, rt5677_filter_power_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2,
- RT5677_PWR_DAC_M4F_L_BIT, 0, NULL, 0),
+ RT5677_PWR_DAC_M4F_L_BIT, 0, rt5677_filter_power_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2,
- RT5677_PWR_DAC_M4F_R_BIT, 0, NULL, 0),
+ RT5677_PWR_DAC_M4F_R_BIT, 0, rt5677_filter_power_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0,
rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)),
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 056375339ea3..5380798883b5 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -229,7 +229,7 @@ SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
6, 1, 0),
SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
- 7, 1, 0),
+ 7, 1, 1),
SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 39ebd7bf4f53..a7e79784fc16 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -365,8 +365,8 @@ static const struct reg_default wm8962_reg[] = {
{ 16924, 0x0059 }, /* R16924 - HDBASS_PG_1 */
{ 16925, 0x999A }, /* R16925 - HDBASS_PG_0 */
- { 17048, 0x0083 }, /* R17408 - HPF_C_1 */
- { 17049, 0x98AD }, /* R17409 - HPF_C_0 */
+ { 17408, 0x0083 }, /* R17408 - HPF_C_1 */
+ { 17409, 0x98AD }, /* R17409 - HPF_C_0 */
{ 17920, 0x007F }, /* R17920 - ADCL_RETUNE_C1_1 */
{ 17921, 0xFFFF }, /* R17921 - ADCL_RETUNE_C1_0 */
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 4495a40a9468..c1c9c2e3525b 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -681,8 +681,8 @@ static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai,
}
mcasp->tdm_slots = slots;
- mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = rx_mask;
- mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = tx_mask;
+ mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
+ mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
mcasp->slot_width = slot_width;
return davinci_mcasp_set_ch_constraints(mcasp);
@@ -908,6 +908,14 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
FSRMOD(total_slots), FSRMOD(0x1FF));
+ /*
+ * If McASP is set to be TX/RX synchronous and the playback is
+ * not running already we need to configure the TX slots in
+ * order to have correct FSX on the bus
+ */
+ if (mcasp_is_synchronous(mcasp) && !mcasp->channels)
+ mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
+ FSXMOD(total_slots), FSXMOD(0x1FF));
}
return 0;
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 19c302b0d763..14dfdee05fd5 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -283,6 +283,8 @@ config SND_SOC_IMX_MC13783
config SND_SOC_FSL_ASOC_CARD
tristate "Generic ASoC Sound Card with ASRC support"
depends on OF && I2C
+ # enforce SND_SOC_FSL_ASOC_CARD=m if SND_AC97_CODEC=m:
+ depends on SND_AC97_CODEC || SND_AC97_CODEC=n
select SND_SOC_IMX_AUDMUX
select SND_SOC_IMX_PCM_DMA
select SND_SOC_FSL_ESAI
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index a4435f5e3be9..ffd5f9acc849 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -454,7 +454,8 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
* Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx.
* Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx.
*/
- regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC, 0);
+ regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC,
+ sai->synchronous[TX] ? FSL_SAI_CR2_SYNC : 0);
regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC,
sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0);
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 7b778ab85f8b..d430ef5a4f38 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -144,7 +144,7 @@ config SND_SOC_INTEL_SKYLAKE
config SND_SOC_INTEL_SKL_RT286_MACH
tristate "ASoC Audio driver for SKL with RT286 I2S mode"
- depends on X86 && ACPI
+ depends on X86 && ACPI && I2C
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_SKYLAKE
select SND_SOC_RT286
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index a7854c8fc523..ffea427aeca8 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -1240,6 +1240,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
*/
ret = snd_soc_tplg_component_load(&platform->component,
&skl_tplg_ops, fw, 0);
+ release_firmware(fw);
if (ret < 0) {
dev_err(bus->dev, "tplg component load failed%d\n", ret);
return -EINVAL;
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index a38a3029062c..ac72ff5055bb 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -280,7 +280,7 @@ static int rk_spdif_probe(struct platform_device *pdev)
int ret;
match = of_match_node(rk_spdif_match, np);
- if ((int) match->data == RK_SPDIF_RK3288) {
+ if (match->data == (void *)RK_SPDIF_RK3288) {
struct regmap *grf;
grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
diff --git a/sound/soc/rockchip/rockchip_spdif.h b/sound/soc/rockchip/rockchip_spdif.h
index 07f86a21046a..921b4095fb92 100644
--- a/sound/soc/rockchip/rockchip_spdif.h
+++ b/sound/soc/rockchip/rockchip_spdif.h
@@ -28,9 +28,9 @@
#define SPDIF_CFGR_VDW(x) (x << SPDIF_CFGR_VDW_SHIFT)
#define SDPIF_CFGR_VDW_MASK (0xf << SPDIF_CFGR_VDW_SHIFT)
-#define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x00)
-#define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x01)
-#define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x10)
+#define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x0)
+#define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x1)
+#define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x2)
/*
* DMACR
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 76da7620904c..edcf4cc2e84f 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -235,7 +235,7 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8),
RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc),
RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0),
- RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1c4),
+ RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1d4),
RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40),
RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40),
RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40),
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 261b50217c48..68b439ed22d7 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -923,6 +923,7 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod,
struct snd_soc_pcm_runtime *rtd)
{
struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
+ struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
struct rsnd_src *src = rsnd_mod_to_src(mod);
int ret;
@@ -937,6 +938,12 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod,
return 0;
/*
+ * SRC In doesn't work if DVC was enabled
+ */
+ if (dvc && !rsnd_io_is_play(io))
+ return 0;
+
+ /*
* enable sync convert
*/
ret = rsnd_kctrl_new_s(mod, io, rtd,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 24b096066a07..a1305f827a98 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -795,12 +795,12 @@ static void soc_resume_deferred(struct work_struct *work)
dev_dbg(card->dev, "ASoC: resume work completed\n");
- /* userspace can access us now we are back as we were before */
- snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
-
/* Recheck all endpoints too, their state is affected by suspend */
dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
+
+ /* userspace can access us now we are back as we were before */
+ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
}
/* powers up audio subsystem after a suspend */
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 016eba10b1ec..7d009428934a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2293,6 +2293,12 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
kfree(w);
}
+void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm)
+{
+ dapm->path_sink_cache.widget = NULL;
+ dapm->path_source_cache.widget = NULL;
+}
+
/* free all dapm widgets and resources */
static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
{
@@ -2303,6 +2309,7 @@ static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
continue;
snd_soc_dapm_free_widget(w);
}
+ snd_soc_dapm_reset_cache(dapm);
}
static struct snd_soc_dapm_widget *dapm_find_widget(
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index ecd38e52285a..2f67ba6d7a8f 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -404,7 +404,7 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx);
/**
* snd_soc_put_volsw_sx - double mixer set callback
* @kcontrol: mixer control
- * @uinfo: control element information
+ * @ucontrol: control element information
*
* Callback to set the value of a double mixer control that spans 2 registers.
*
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 8d7ec80af51b..6963ba20991c 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -531,7 +531,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
/* TLV bytes controls need standard kcontrol info handler,
* TLV callback and extended put/get handlers.
*/
- k->info = snd_soc_bytes_info;
+ k->info = snd_soc_bytes_info_ext;
k->tlv.c = snd_soc_bytes_tlv_callback;
ext_ops = tplg->bytes_ext_ops;
@@ -1805,6 +1805,7 @@ void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
snd_soc_tplg_widget_remove(w);
snd_soc_dapm_free_widget(w);
}
+ snd_soc_dapm_reset_cache(dapm);
}
EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 843f037a317d..5c2bc53f0a9b 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -669,6 +669,7 @@ static int uni_player_startup(struct snd_pcm_substream *substream,
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
+ player->substream = substream;
player->clk_adj = 0;
@@ -950,6 +951,8 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream,
if (player->state != UNIPERIF_STATE_STOPPED)
/* Stop the player */
uni_player_stop(player);
+
+ player->substream = NULL;
}
static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
@@ -989,7 +992,7 @@ static int uni_player_parse_dt(struct platform_device *pdev,
if (!info)
return -ENOMEM;
- if (of_property_read_u32(pnode, "version", &player->ver) ||
+ if (of_property_read_u32(pnode, "st,version", &player->ver) ||
player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
dev_err(dev, "Unknown uniperipheral version ");
return -EINVAL;
@@ -998,13 +1001,13 @@ static int uni_player_parse_dt(struct platform_device *pdev,
if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
info->underflow_enabled = 1;
- if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) {
+ if (of_property_read_u32(pnode, "st,uniperiph-id", &info->id)) {
dev_err(dev, "uniperipheral id not defined");
return -EINVAL;
}
/* Read the device mode property */
- if (of_property_read_string(pnode, "mode", &mode)) {
+ if (of_property_read_string(pnode, "st,mode", &mode)) {
dev_err(dev, "uniperipheral mode not defined");
return -EINVAL;
}
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index f791239a3087..8a0eb2050169 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -316,7 +316,7 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
if (!info)
return -ENOMEM;
- if (of_property_read_u32(node, "version", &reader->ver) ||
+ if (of_property_read_u32(node, "st,version", &reader->ver) ||
reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
dev_err(&pdev->dev, "Unknown uniperipheral version ");
return -EINVAL;
@@ -346,7 +346,6 @@ int uni_reader_init(struct platform_device *pdev,
reader->hw = &uni_reader_pcm_hw;
reader->dai_ops = &uni_reader_dai_ops;
- dev_err(reader->dev, "%s: enter\n", __func__);
ret = uni_reader_parse_dt(pdev, reader);
if (ret < 0) {
dev_err(reader->dev, "Failed to parse DeviceTree");
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index bcbf4da168b6..1bb896d78d09 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -2,6 +2,7 @@
* Copyright 2014 Emilio López <emilio@elopez.com.ar>
* Copyright 2014 Jon Smirl <jonsmirl@gmail.com>
* Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright 2015 Adam Sampson <ats@offog.org>
*
* Based on the Allwinner SDK driver, released under the GPL.
*
@@ -404,7 +405,7 @@ static const struct snd_kcontrol_new sun4i_codec_pa_mute =
static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1);
static const struct snd_kcontrol_new sun4i_codec_widgets[] = {
- SOC_SINGLE_TLV("PA Volume", SUN4I_CODEC_DAC_ACTL,
+ SOC_SINGLE_TLV("Power Amplifier Volume", SUN4I_CODEC_DAC_ACTL,
SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0,
sun4i_codec_pa_volume_scale),
};
@@ -452,12 +453,12 @@ static const struct snd_soc_dapm_widget sun4i_codec_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL,
SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0),
- /* Pre-Amplifier */
- SND_SOC_DAPM_MIXER("Pre-Amplifier", SUN4I_CODEC_ADC_ACTL,
+ /* Power Amplifier */
+ SND_SOC_DAPM_MIXER("Power Amplifier", SUN4I_CODEC_ADC_ACTL,
SUN4I_CODEC_ADC_ACTL_PA_EN, 0,
sun4i_codec_pa_mixer_controls,
ARRAY_SIZE(sun4i_codec_pa_mixer_controls)),
- SND_SOC_DAPM_SWITCH("Pre-Amplifier Mute", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_SWITCH("Power Amplifier Mute", SND_SOC_NOPM, 0, 0,
&sun4i_codec_pa_mute),
SND_SOC_DAPM_OUTPUT("HP Right"),
@@ -480,16 +481,16 @@ static const struct snd_soc_dapm_route sun4i_codec_dapm_routes[] = {
{ "Left Mixer", NULL, "Mixer Enable" },
{ "Left Mixer", "Left DAC Playback Switch", "Left DAC" },
- /* Pre-Amplifier Mixer Routes */
- { "Pre-Amplifier", "Mixer Playback Switch", "Left Mixer" },
- { "Pre-Amplifier", "Mixer Playback Switch", "Right Mixer" },
- { "Pre-Amplifier", "DAC Playback Switch", "Left DAC" },
- { "Pre-Amplifier", "DAC Playback Switch", "Right DAC" },
+ /* Power Amplifier Routes */
+ { "Power Amplifier", "Mixer Playback Switch", "Left Mixer" },
+ { "Power Amplifier", "Mixer Playback Switch", "Right Mixer" },
+ { "Power Amplifier", "DAC Playback Switch", "Left DAC" },
+ { "Power Amplifier", "DAC Playback Switch", "Right DAC" },
- /* PA -> HP path */
- { "Pre-Amplifier Mute", "Switch", "Pre-Amplifier" },
- { "HP Right", NULL, "Pre-Amplifier Mute" },
- { "HP Left", NULL, "Pre-Amplifier Mute" },
+ /* Headphone Output Routes */
+ { "Power Amplifier Mute", "Switch", "Power Amplifier" },
+ { "HP Right", NULL, "Power Amplifier Mute" },
+ { "HP Left", NULL, "Power Amplifier Mute" },
};
static struct snd_soc_codec_driver sun4i_codec_codec = {
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 40ab4476c80a..51cf8256c6cd 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -420,8 +420,7 @@ static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
static int nfit_test0_alloc(struct nfit_test *t)
{
- size_t nfit_size = sizeof(struct acpi_table_nfit)
- + sizeof(struct acpi_nfit_system_address) * NUM_SPA
+ size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
@@ -471,8 +470,7 @@ static int nfit_test0_alloc(struct nfit_test *t)
static int nfit_test1_alloc(struct nfit_test *t)
{
- size_t nfit_size = sizeof(struct acpi_table_nfit)
- + sizeof(struct acpi_nfit_system_address)
+ size_t nfit_size = sizeof(struct acpi_nfit_system_address)
+ sizeof(struct acpi_nfit_memory_map)
+ sizeof(struct acpi_nfit_control_region);
@@ -488,39 +486,24 @@ static int nfit_test1_alloc(struct nfit_test *t)
return 0;
}
-static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size)
-{
- memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4);
- nfit->header.length = size;
- nfit->header.revision = 1;
- memcpy(nfit->header.oem_id, "LIBND", 6);
- memcpy(nfit->header.oem_table_id, "TEST", 5);
- nfit->header.oem_revision = 1;
- memcpy(nfit->header.asl_compiler_id, "TST", 4);
- nfit->header.asl_compiler_revision = 1;
-}
-
static void nfit_test0_setup(struct nfit_test *t)
{
struct nvdimm_bus_descriptor *nd_desc;
struct acpi_nfit_desc *acpi_desc;
struct acpi_nfit_memory_map *memdev;
void *nfit_buf = t->nfit_buf;
- size_t size = t->nfit_size;
struct acpi_nfit_system_address *spa;
struct acpi_nfit_control_region *dcr;
struct acpi_nfit_data_region *bdw;
struct acpi_nfit_flush_address *flush;
unsigned int offset;
- nfit_test_init_header(nfit_buf, size);
-
/*
* spa0 (interleave first half of dimm0 and dimm1, note storage
* does not actually alias the related block-data-window
* regions)
*/
- spa = nfit_buf + sizeof(struct acpi_table_nfit);
+ spa = nfit_buf;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
@@ -533,7 +516,7 @@ static void nfit_test0_setup(struct nfit_test *t)
* does not actually alias the related block-data-window
* regions)
*/
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa);
+ spa = nfit_buf + sizeof(*spa);
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
@@ -542,7 +525,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->length = SPA1_SIZE;
/* spa2 (dcr0) dimm0 */
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2;
+ spa = nfit_buf + sizeof(*spa) * 2;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -551,7 +534,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->length = DCR_SIZE;
/* spa3 (dcr1) dimm1 */
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3;
+ spa = nfit_buf + sizeof(*spa) * 3;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -560,7 +543,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->length = DCR_SIZE;
/* spa4 (dcr2) dimm2 */
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4;
+ spa = nfit_buf + sizeof(*spa) * 4;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -569,7 +552,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->length = DCR_SIZE;
/* spa5 (dcr3) dimm3 */
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5;
+ spa = nfit_buf + sizeof(*spa) * 5;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -578,7 +561,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->length = DCR_SIZE;
/* spa6 (bdw for dcr0) dimm0 */
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6;
+ spa = nfit_buf + sizeof(*spa) * 6;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -587,7 +570,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->length = DIMM_SIZE;
/* spa7 (bdw for dcr1) dimm1 */
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7;
+ spa = nfit_buf + sizeof(*spa) * 7;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -596,7 +579,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->length = DIMM_SIZE;
/* spa8 (bdw for dcr2) dimm2 */
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8;
+ spa = nfit_buf + sizeof(*spa) * 8;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -605,7 +588,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->length = DIMM_SIZE;
/* spa9 (bdw for dcr3) dimm3 */
- spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9;
+ spa = nfit_buf + sizeof(*spa) * 9;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
spa->header.length = sizeof(*spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -613,7 +596,7 @@ static void nfit_test0_setup(struct nfit_test *t)
spa->address = t->dimm_dma[3];
spa->length = DIMM_SIZE;
- offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10;
+ offset = sizeof(*spa) * 10;
/* mem-region0 (spa0, dimm0) */
memdev = nfit_buf + offset;
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
@@ -1100,15 +1083,13 @@ static void nfit_test0_setup(struct nfit_test *t)
static void nfit_test1_setup(struct nfit_test *t)
{
- size_t size = t->nfit_size, offset;
+ size_t offset;
void *nfit_buf = t->nfit_buf;
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_control_region *dcr;
struct acpi_nfit_system_address *spa;
- nfit_test_init_header(nfit_buf, size);
-
- offset = sizeof(struct acpi_table_nfit);
+ offset = 0;
/* spa0 (flat range with no bdw aliasing) */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 0a3da64638ce..4db7d5691ba7 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -110,4 +110,10 @@ static inline void free_page(unsigned long addr)
(void) (&_min1 == &_min2); \
_min1 < _min2 ? _min1 : _min2; })
+/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
+#define list_add_tail(a, b) do {} while (0)
+#define list_del(a) do {} while (0)
+#define list_for_each_entry(a, b, c) while (0)
+/* end of stubs */
+
#endif /* KERNEL_H */
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index a3e07016a440..ee125e714053 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -3,12 +3,6 @@
#include <linux/scatterlist.h>
#include <linux/kernel.h>
-/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
-#define list_add_tail(a, b) do {} while (0)
-#define list_del(a) do {} while (0)
-#define list_for_each_entry(a, b, c) while (0)
-/* end of stubs */
-
struct virtio_device {
void *dev;
u64 features;
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index 806d683ab107..57a6964a1e35 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -40,33 +40,39 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev,
#define virtio_has_feature(dev, feature) \
(__virtio_test_bit((dev), feature))
+static inline bool virtio_is_little_endian(struct virtio_device *vdev)
+{
+ return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
+ virtio_legacy_is_little_endian();
+}
+
+/* Memory accessors */
static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
{
- return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
{
- return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
}
static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
{
- return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
{
- return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
}
static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
{
- return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
{
- return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
}
-