summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/acpi_io.h2
-rw-r--r--include/acpi/acpixf.h26
-rw-r--r--include/acpi/actbl.h1
-rw-r--r--include/acpi/platform/aclinuxex.h1
-rw-r--r--include/acpi/processor.h3
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/asm-generic/asm-prototypes.h13
-rw-r--r--include/asm-generic/cputime_jiffies.h1
-rw-r--r--include/asm-generic/cputime_nsecs.h1
-rw-r--r--include/asm-generic/mutex-dec.h88
-rw-r--r--include/asm-generic/mutex-null.h19
-rw-r--r--include/asm-generic/mutex-xchg.h120
-rw-r--r--include/asm-generic/mutex.h9
-rw-r--r--include/asm-generic/pgtable.h22
-rw-r--r--include/asm-generic/termios-base.h2
-rw-r--r--include/asm-generic/termios.h2
-rw-r--r--include/asm-generic/tlb.h83
-rw-r--r--include/asm-generic/vmlinux.lds.h69
-rw-r--r--include/clocksource/pxa.h3
-rw-r--r--include/crypto/acompress.h269
-rw-r--r--include/crypto/aead.h50
-rw-r--r--include/crypto/cbc.h146
-rw-r--r--include/crypto/cryptd.h13
-rw-r--r--include/crypto/dh.h58
-rw-r--r--include/crypto/ecdh.h58
-rw-r--r--include/crypto/engine.h6
-rw-r--r--include/crypto/gf128mul.h15
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/internal/acompress.h81
-rw-r--r--include/crypto/internal/scompress.h136
-rw-r--r--include/crypto/internal/simd.h17
-rw-r--r--include/crypto/internal/skcipher.h65
-rw-r--r--include/crypto/kpp.h15
-rw-r--r--include/crypto/skcipher.h4
-rw-r--r--include/crypto/xts.h26
-rw-r--r--include/drm/bridge/mhl.h291
-rw-r--r--include/drm/drmP.h337
-rw-r--r--include/drm/drm_atomic.h55
-rw-r--r--include/drm/drm_blend.h10
-rw-r--r--include/drm/drm_connector.h72
-rw-r--r--include/drm/drm_crtc.h734
-rw-r--r--include/drm/drm_debugfs_crc.h73
-rw-r--r--include/drm/drm_dp_dual_mode_helper.h27
-rw-r--r--include/drm/drm_dp_helper.h6
-rw-r--r--include/drm/drm_drv.h435
-rw-r--r--include/drm/drm_edid.h14
-rw-r--r--include/drm/drm_encoder.h2
-rw-r--r--include/drm/drm_fb_cma_helper.h5
-rw-r--r--include/drm/drm_fb_helper.h4
-rw-r--r--include/drm/drm_fourcc.h33
-rw-r--r--include/drm/drm_framebuffer.h22
-rw-r--r--include/drm/drm_irq.h63
-rw-r--r--include/drm/drm_mm.h28
-rw-r--r--include/drm/drm_mode_config.h663
-rw-r--r--include/drm/drm_modeset_helper_vtables.h28
-rw-r--r--include/drm/drm_modeset_lock.h12
-rw-r--r--include/drm/drm_of.h13
-rw-r--r--include/drm/drm_plane.h100
-rw-r--r--include/drm/drm_print.h112
-rw-r--r--include/drm/i915_component.h6
-rw-r--r--include/drm/ttm/ttm_bo_api.h15
-rw-r--r--include/drm/ttm/ttm_bo_driver.h48
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h2
-rw-r--r--include/dt-bindings/clock/hi3516cv300-clock.h48
-rw-r--r--include/dt-bindings/clock/histb-clock.h66
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h15
-rw-r--r--include/dt-bindings/clock/oxsemi,ox810se.h30
-rw-r--r--include/dt-bindings/clock/oxsemi,ox820.h40
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8994.h137
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h69
-rw-r--r--include/dt-bindings/clock/r7s72100-clock.h7
-rw-r--r--include/dt-bindings/clock/r8a7743-cpg-mssr.h43
-rw-r--r--include/dt-bindings/clock/r8a7745-cpg-mssr.h44
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h3
-rw-r--r--include/dt-bindings/clock/rk1108-cru.h269
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h8
-rw-r--r--include/dt-bindings/clock/stih415-clks.h16
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h134
-rw-r--r--include/dt-bindings/clock/tegra186-clock.h940
-rw-r--r--include/dt-bindings/gpio/meson-gxl-gpio.h131
-rw-r--r--include/dt-bindings/mailbox/tegra186-hsp.h24
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h21
-rw-r--r--include/dt-bindings/pinctrl/at91.h2
-rw-r--r--include/dt-bindings/pinctrl/bcm2835.h5
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h4
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h6
-rw-r--r--include/dt-bindings/pinctrl/rockchip.h33
-rw-r--r--include/dt-bindings/power/mt2701-power.h27
-rw-r--r--include/dt-bindings/power/r8a7743-sysc.h25
-rw-r--r--include/dt-bindings/power/r8a7745-sysc.h25
-rw-r--r--include/dt-bindings/power/tegra186-powergate.h39
-rw-r--r--include/dt-bindings/reset/oxsemi,ox810se.h53
-rw-r--r--include/dt-bindings/reset/oxsemi,ox820.h53
-rw-r--r--include/dt-bindings/reset/sun50i-a64-ccu.h98
-rw-r--r--include/dt-bindings/reset/tegra186-reset.h217
-rw-r--r--include/kvm/arm_arch_timer.h4
-rw-r--r--include/linux/acpi.h38
-rw-r--r--include/linux/acpi_iort.h16
-rw-r--r--include/linux/ahci-remap.h28
-rw-r--r--include/linux/aio.h5
-rw-r--r--include/linux/alarmtimer.h5
-rw-r--r--include/linux/amba/pl061.h16
-rw-r--r--include/linux/amba/pl08x.h4
-rw-r--r--include/linux/ata.h6
-rw-r--r--include/linux/atmel-ssc.h1
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/bio.h49
-rw-r--r--include/linux/blk-cgroup.h11
-rw-r--r--include/linux/blk-mq.h12
-rw-r--r--include/linux/blk_types.h188
-rw-r--r--include/linux/blkdev.h246
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/bpf-cgroup.h92
-rw-r--r--include/linux/bpf.h30
-rw-r--r--include/linux/bpf_verifier.h14
-rw-r--r--include/linux/brcmphy.h27
-rw-r--r--include/linux/bsg-lib.h4
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/bug.h17
-rw-r--r--include/linux/cacheinfo.h4
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/ccp.h6
-rw-r--r--include/linux/ceph/auth.h5
-rw-r--r--include/linux/ceph/ceph_fs.h3
-rw-r--r--include/linux/ceph/mdsmap.h5
-rw-r--r--include/linux/ceph/messenger.h4
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/clk.h29
-rw-r--r--include/linux/clk/renesas.h4
-rw-r--r--include/linux/clocksource.h27
-rw-r--r--include/linux/cma.h3
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/configfs.h15
-rw-r--r--include/linux/console.h16
-rw-r--r--include/linux/cpu.h99
-rw-r--r--include/linux/cpufreq.h6
-rw-r--r--include/linux/cpuhotplug.h33
-rw-r--r--include/linux/cpuidle.h9
-rw-r--r--include/linux/cpumask.h5
-rw-r--r--include/linux/crypto.h9
-rw-r--r--include/linux/dax.h70
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/dcookies.h4
-rw-r--r--include/linux/debugfs.h47
-rw-r--r--include/linux/devfreq_cooling.h9
-rw-r--r--include/linux/device.h112
-rw-r--r--include/linux/dm-io.h2
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-fence-array.h86
-rw-r--r--include/linux/dma-fence.h438
-rw-r--r--include/linux/dma-iommu.h4
-rw-r--r--include/linux/dma-mapping.h20
-rw-r--r--include/linux/dmaengine.h8
-rw-r--r--include/linux/drbd_genl.h2
-rw-r--r--include/linux/dw_apb_timer.h2
-rw-r--r--include/linux/edac.h162
-rw-r--r--include/linux/efi.h46
-rw-r--r--include/linux/elevator.h9
-rw-r--r--include/linux/f2fs_fs.h10
-rw-r--r--include/linux/fddidevice.h1
-rw-r--r--include/linux/fence-array.h83
-rw-r--r--include/linux/fence.h378
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/filter.h41
-rw-r--r--include/linux/fpga/fpga-bridge.h60
-rw-r--r--include/linux/fpga/fpga-mgr.h29
-rw-r--r--include/linux/fs.h98
-rw-r--r--include/linux/fscrypto.h134
-rw-r--r--include/linux/fsl/guts.h125
-rw-r--r--include/linux/fsl_devices.h1
-rw-r--r--include/linux/fsnotify.h12
-rw-r--r--include/linux/fsnotify_backend.h12
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/futex.h4
-rw-r--r--include/linux/fwnode.h3
-rw-r--r--include/linux/genhd.h9
-rw-r--r--include/linux/genl_magic_func.h29
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio/driver.h34
-rw-r--r--include/linux/gpio_keys.h5
-rw-r--r--include/linux/hdlc.h2
-rw-r--r--include/linux/hdmi.h2
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/hippidevice.h1
-rw-r--r--include/linux/hrtimer.h12
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/hwmon.h31
-rw-r--r--include/linux/hyperv.h53
-rw-r--r--include/linux/i2c-smbus.h27
-rw-r--r--include/linux/i2c.h26
-rw-r--r--include/linux/i2c/mlxcpld.h52
-rw-r--r--include/linux/idr.h40
-rw-r--r--include/linux/ieee80211.h26
-rw-r--r--include/linux/if_arp.h16
-rw-r--r--include/linux/if_vlan.h16
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h1
-rw-r--r--include/linux/iio/common/st_sensors.h12
-rw-r--r--include/linux/iio/consumer.h41
-rw-r--r--include/linux/iio/dac/mcp4725.h12
-rw-r--r--include/linux/iio/iio.h48
-rw-r--r--include/linux/iio/sysfs.h24
-rw-r--r--include/linux/iio/trigger.h2
-rw-r--r--include/linux/iio/types.h5
-rw-r--r--include/linux/ima.h12
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/interrupt.h20
-rw-r--r--include/linux/iomap.h12
-rw-r--r--include/linux/iommu.h15
-rw-r--r--include/linux/ipv6.h10
-rw-r--r--include/linux/irqchip/arm-gic-v3.h12
-rw-r--r--include/linux/irqchip/mips-gic.h8
-rw-r--r--include/linux/isdnif.h2
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kernel.h14
-rw-r--r--include/linux/kernel_stat.h4
-rw-r--r--include/linux/kexec.h42
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/ktime.h81
-rw-r--r--include/linux/kvm_host.h13
-rw-r--r--include/linux/leds.h25
-rw-r--r--include/linux/libata.h5
-rw-r--r--include/linux/lightnvm.h244
-rw-r--r--include/linux/list.h37
-rw-r--r--include/linux/lockdep.h25
-rw-r--r--include/linux/mbus.h8
-rw-r--r--include/linux/mc146818rtc.h1
-rw-r--r--include/linux/mdev.h138
-rw-r--r--include/linux/mei_cl_bus.h51
-rw-r--r--include/linux/mempolicy.h8
-rw-r--r--include/linux/mfd/axp20x.h6
-rw-r--r--include/linux/mfd/cros_ec.h10
-rw-r--r--include/linux/mfd/cros_ec_commands.h183
-rw-r--r--include/linux/mfd/davinci_voicecodec.h4
-rw-r--r--include/linux/mfd/intel_soc_pmic.h1
-rw-r--r--include/linux/mfd/max77620.h2
-rw-r--r--include/linux/mfd/rk808.h1
-rw-r--r--include/linux/mfd/rn5t618.h9
-rw-r--r--include/linux/mfd/sun4i-gpadc.h94
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h8
-rw-r--r--include/linux/mfd/tmio.h5
-rw-r--r--include/linux/mfd/tps65217.h15
-rw-r--r--include/linux/mfd/tps65218.h3
-rw-r--r--include/linux/mfd/tps65912.h16
-rw-r--r--include/linux/mii.h4
-rw-r--r--include/linux/miscdevice.h8
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h59
-rw-r--r--include/linux/mlx5/fs.h43
-rw-r--r--include/linux/mlx5/mlx5_ifc.h228
-rw-r--r--include/linux/mlx5/port.h9
-rw-r--r--include/linux/mlx5/srq.h2
-rw-r--r--include/linux/mlx5/vport.h10
-rw-r--r--include/linux/mm.h50
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mmc/card.h14
-rw-r--r--include/linux/mmc/core.h16
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h17
-rw-r--r--include/linux/mmc/mmc.h17
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/mod_devicetable.h3
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mount.h6
-rw-r--r--include/linux/mtd/nand.h30
-rw-r--r--include/linux/mutex-debug.h24
-rw-r--r--include/linux/mutex.h77
-rw-r--r--include/linux/netdevice.h210
-rw-r--r--include/linux/netfilter.h89
-rw-r--r--include/linux/netfilter/ipset/ip_set.h136
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h11
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h75
-rw-r--r--include/linux/netfilter/ipset/ip_set_skbinfo.h46
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h2
-rw-r--r--include/linux/netfilter/x_tables.h86
-rw-r--r--include/linux/netfilter_ingress.h9
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs.h13
-rw-r--r--include/linux/nfs_xdr.h27
-rw-r--r--include/linux/nmi.h24
-rw-r--r--include/linux/ntb.h5
-rw-r--r--include/linux/nvme-fc-driver.h851
-rw-r--r--include/linux/nvme-fc.h268
-rw-r--r--include/linux/nvme.h43
-rw-r--r--include/linux/of.h25
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/linux/of_iommu.h12
-rw-r--r--include/linux/of_pci.h7
-rw-r--r--include/linux/page-flags.h33
-rw-r--r--include/linux/pagemap.h25
-rw-r--r--include/linux/parser.h1
-rw-r--r--include/linux/pci-acpi.h4
-rw-r--r--include/linux/pci-ecam.h9
-rw-r--r--include/linux/pci.h42
-rw-r--r--include/linux/pci_hotplug.h2
-rw-r--r--include/linux/pci_ids.h30
-rw-r--r--include/linux/phy.h44
-rw-r--r--include/linux/phy/phy-qcom-ufs.h18
-rw-r--r--include/linux/phy_led_triggers.h51
-rw-r--r--include/linux/pim.h81
-rw-r--r--include/linux/platform_data/dma-dw.h5
-rw-r--r--include/linux/platform_data/drv260x-pdata.h28
-rw-r--r--include/linux/platform_data/macb.h6
-rw-r--r--include/linux/platform_data/mlxcpld-hotplug.h99
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h7
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h3
-rw-r--r--include/linux/platform_data/usb-davinci.h23
-rw-r--r--include/linux/pm-trace.h9
-rw-r--r--include/linux/pm.h4
-rw-r--r--include/linux/pm_domain.h28
-rw-r--r--include/linux/pm_opp.h72
-rw-r--r--include/linux/pm_runtime.h21
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/power/bq27xxx_battery.h3
-rw-r--r--include/linux/preempt.h21
-rw-r--r--include/linux/printk.h17
-rw-r--r--include/linux/proc_fs.h6
-rw-r--r--include/linux/pstore.h76
-rw-r--r--include/linux/pstore_ram.h14
-rw-r--r--include/linux/ptp_clock_kernel.h73
-rw-r--r--include/linux/ptrace.h4
-rw-r--r--include/linux/qed/qed_chain.h144
-rw-r--r--include/linux/qed/qed_eth_if.h61
-rw-r--r--include/linux/qed/qed_if.h49
-rw-r--r--include/linux/qed/qed_iscsi_if.h229
-rw-r--r--include/linux/quota.h3
-rw-r--r--include/linux/quotaops.h2
-rw-r--r--include/linux/radix-tree.h212
-rw-r--r--include/linux/ratelimit.h7
-rw-r--r--include/linux/rculist.h8
-rw-r--r--include/linux/regulator/consumer.h27
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/remoteproc.h36
-rw-r--r--include/linux/remoteproc/st_slim_rproc.h58
-rw-r--r--include/linux/reservation.h41
-rw-r--r--include/linux/restart_block.h51
-rw-r--r--include/linux/ring_buffer.h6
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/rmi.h60
-rw-r--r--include/linux/rpmsg.h125
-rw-r--r--include/linux/rpmsg/qcom_smd.h33
-rw-r--r--include/linux/sched.h122
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/seg6.h6
-rw-r--r--include/linux/seg6_genl.h6
-rw-r--r--include/linux/seg6_hmac.h6
-rw-r--r--include/linux/seg6_iptunnel.h6
-rw-r--r--include/linux/seqno-fence.h20
-rw-r--r--include/linux/serial_8250.h8
-rw-r--r--include/linux/serial_core.h6
-rw-r--r--include/linux/signal.h17
-rw-r--r--include/linux/skbuff.h31
-rw-r--r--include/linux/smc91x.h1
-rw-r--r--include/linux/soc/qcom/wcnss_ctrl.h13
-rw-r--r--include/linux/soc/renesas/rcar-rst.h6
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h249
-rw-r--r--include/linux/spi/spi.h1
-rw-r--r--include/linux/stm.h4
-rw-r--r--include/linux/stmmac.h5
-rw-r--r--include/linux/sunrpc/svc_rdma.h7
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h37
-rw-r--r--include/linux/swiotlb.h25
-rw-r--r--include/linux/sync_file.h14
-rw-r--r--include/linux/sys_soc.h9
-rw-r--r--include/linux/tcp.h24
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/thread_info.h45
-rw-r--r--include/linux/tick.h4
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/timecounter.h12
-rw-r--r--include/linux/timekeeper_internal.h10
-rw-r--r--include/linux/timekeeping.h5
-rw-r--r--include/linux/trace.h28
-rw-r--r--include/linux/tracepoint-defs.h2
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/types.h7
-rw-r--r--include/linux/udp.h3
-rw-r--r--include/linux/uio.h5
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/gadget.h4
-rw-r--r--include/linux/usb/hcd.h19
-rw-r--r--include/linux/userfaultfd_k.h4
-rw-r--r--include/linux/vfio.h48
-rw-r--r--include/linux/virtio_net.h4
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vme.h1
-rw-r--r--include/linux/vt_kern.h2
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/workqueue.h46
-rw-r--r--include/linux/writeback.h14
-rw-r--r--include/linux/ww_mutex.h2
-rw-r--r--include/media/cec.h12
-rw-r--r--include/media/media-device.h38
-rw-r--r--include/media/rc-core.h18
-rw-r--r--include/media/v4l2-common.h7
-rw-r--r--include/media/v4l2-dv-timings.h20
-rw-r--r--include/media/v4l2-mem2mem.h3
-rw-r--r--include/media/v4l2-tpg.h24
-rw-r--r--include/net/act_api.h5
-rw-r--r--include/net/bluetooth/bluetooth.h25
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/busy_poll.h33
-rw-r--r--include/net/cfg80211.h161
-rw-r--r--include/net/checksum.h2
-rw-r--r--include/net/devlink.h2
-rw-r--r--include/net/dst_metadata.h10
-rw-r--r--include/net/fib_rules.h9
-rw-r--r--include/net/flow.h10
-rw-r--r--include/net/flow_dissector.h22
-rw-r--r--include/net/flowcache.h2
-rw-r--r--include/net/gen_stats.h17
-rw-r--r--include/net/genetlink.h107
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet_connection_sock.h11
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/ip.h6
-rw-r--r--include/net/ip6_route.h5
-rw-r--r--include/net/ip_fib.h8
-rw-r--r--include/net/ip_tunnels.h6
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/lwtunnel.h13
-rw-r--r--include/net/mac80211.h19
-rw-r--r--include/net/ndisc.h5
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h9
-rw-r--r--include/net/netfilter/ipv4/nf_defrag_ipv4.h3
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h9
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h20
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h2
-rw-r--r--include/net/netfilter/nf_dup_netdev.h1
-rw-r--r--include/net/netfilter/nf_log.h7
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h9
-rw-r--r--include/net/netfilter/nf_queue.h1
-rw-r--r--include/net/netfilter/nf_socket.h27
-rw-r--r--include/net/netfilter/nf_tables.h137
-rw-r--r--include/net/netfilter/nf_tables_core.h34
-rw-r--r--include/net/netfilter/nft_fib.h31
-rw-r--r--include/net/netfilter/xt_rateest.h10
-rw-r--r--include/net/netlink.h12
-rw-r--r--include/net/netns/conntrack.h44
-rw-r--r--include/net/netns/generic.h16
-rw-r--r--include/net/netns/ipv4.h4
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h6
-rw-r--r--include/net/pkt_cls.h21
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/raw.h6
-rw-r--r--include/net/rawv6.h7
-rw-r--r--include/net/red.h4
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h7
-rw-r--r--include/net/secure_seq.h8
-rw-r--r--include/net/seg6.h62
-rw-r--r--include/net/seg6_hmac.h62
-rw-r--r--include/net/sock.h136
-rw-r--r--include/net/tc_act/tc_mirred.h6
-rw-r--r--include/net/tc_act/tc_skbedit.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h37
-rw-r--r--include/net/tcp.h24
-rw-r--r--include/net/udp.h20
-rw-r--r--include/net/udplite.h3
-rw-r--r--include/net/vxlan.h10
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/rdma/ib_cm.h6
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_verbs.h72
-rw-r--r--include/rdma/iw_cm.h6
-rw-r--r--include/rdma/opa_smi.h2
-rw-r--r--include/rdma/rdma_cm.h25
-rw-r--r--include/rdma/rdma_vt.h46
-rw-r--r--include/rdma/rdmavt_mr.h10
-rw-r--r--include/rdma/rdmavt_qp.h77
-rw-r--r--include/scsi/iscsi_proto.h2
-rw-r--r--include/scsi/libfc.h206
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_host.h8
-rw-r--r--include/scsi/scsi_proto.h17
-rw-r--r--include/scsi/scsi_transport_fc.h62
-rw-r--r--include/soc/arc/aux.h63
-rw-r--r--include/soc/arc/mcip.h103
-rw-r--r--include/soc/arc/timers.h38
-rw-r--r--include/soc/at91/atmel-secumod.h19
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h21
-rw-r--r--include/soc/fsl/qman.h62
-rw-r--r--include/soc/nps/mtm.h59
-rw-r--r--include/soc/tegra/bpmp-abi.h1601
-rw-r--r--include/soc/tegra/bpmp.h141
-rw-r--r--include/soc/tegra/ivc.h109
-rw-r--r--include/soc/tegra/pmc.h126
-rw-r--r--include/sound/compress_driver.h1
-rw-r--r--include/sound/core.h20
-rw-r--r--include/sound/dmaengine_pcm.h6
-rw-r--r--include/sound/emu10k1.h3
-rw-r--r--include/sound/hda_i915.h11
-rw-r--r--include/sound/hdmi-codec.h8
-rw-r--r--include/sound/soc-dai.h3
-rw-r--r--include/sound/soc.h25
-rw-r--r--include/target/iscsi/iscsi_target_core.h14
-rw-r--r--include/target/iscsi/iscsi_target_stat.h4
-rw-r--r--include/target/iscsi/iscsi_transport.h6
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h14
-rw-r--r--include/target/target_core_fabric.h4
-rw-r--r--include/trace/events/alarmtimer.h96
-rw-r--r--include/trace/events/bcache.h12
-rw-r--r--include/trace/events/block.h31
-rw-r--r--include/trace/events/btrfs.h67
-rw-r--r--include/trace/events/dma_fence.h (renamed from include/trace/events/fence.h)44
-rw-r--r--include/trace/events/f2fs.h33
-rw-r--r--include/trace/events/i2c.h2
-rw-r--r--include/trace/events/mdio.h42
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/trace/events/rcu.h5
-rw-r--r--include/trace/events/swiotlb.h17
-rw-r--r--include/trace/events/timer.h16
-rw-r--r--include/trace/events/wbt.h153
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/drm/amdgpu_drm.h92
-rw-r--r--include/uapi/drm/drm_mode.h59
-rw-r--r--include/uapi/drm/i915_drm.h5
-rw-r--r--include/uapi/drm/msm_drm.h25
-rw-r--r--include/uapi/drm/vc4_drm.h2
-rw-r--r--include/uapi/linux/Kbuild5
-rw-r--r--include/uapi/linux/audit.h5
-rw-r--r--include/uapi/linux/blkzoned.h143
-rw-r--r--include/uapi/linux/bpf.h642
-rw-r--r--include/uapi/linux/cec-funcs.h (renamed from include/linux/cec-funcs.h)76
-rw-r--r--include/uapi/linux/cec.h (renamed from include/linux/cec.h)94
-rw-r--r--include/uapi/linux/cryptouser.h5
-rw-r--r--include/uapi/linux/devlink.h8
-rw-r--r--include/uapi/linux/dm-log-userspace.h53
-rw-r--r--include/uapi/linux/ethtool.h18
-rw-r--r--include/uapi/linux/fib_rules.h6
-rw-r--r--include/uapi/linux/fs.h18
-rw-r--r--include/uapi/linux/genetlink.h3
-rw-r--r--include/uapi/linux/hw_breakpoint.h4
-rw-r--r--include/uapi/linux/if_ether.h3
-rw-r--r--include/uapi/linux/if_link.h6
-rw-r--r--include/uapi/linux/if_pppol2tp.h13
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/in.h1
-rw-r--r--include/uapi/linux/in6.h1
-rw-r--r--include/uapi/linux/inet_diag.h17
-rw-r--r--include/uapi/linux/ipv6.h4
-rw-r--r--include/uapi/linux/kvm.h5
-rw-r--r--include/uapi/linux/l2tp.h21
-rw-r--r--include/uapi/linux/lwtunnel.h24
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/major.h2
-rw-r--r--include/uapi/linux/mmc/ioctl.h2
-rw-r--r--include/uapi/linux/nbd.h9
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/netfilter.h2
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_tuple_common.h3
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h133
-rw-r--r--include/uapi/linux/netfilter/xt_bpf.h21
-rw-r--r--include/uapi/linux/nl80211.h76
-rw-r--r--include/uapi/linux/openvswitch.h15
-rw-r--r--include/uapi/linux/pci_regs.h8
-rw-r--r--include/uapi/linux/pkt_cls.h28
-rw-r--r--include/uapi/linux/raid/md_p.h7
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--include/uapi/linux/seg6.h54
-rw-r--r--include/uapi/linux/seg6_genl.h32
-rw-r--r--include/uapi/linux/seg6_hmac.h21
-rw-r--r--include/uapi/linux/seg6_iptunnel.h44
-rw-r--r--include/uapi/linux/sockios.h3
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h1
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h1
-rw-r--r--include/uapi/linux/tcp.h12
-rw-r--r--include/uapi/linux/types.h4
-rw-r--r--include/uapi/linux/uleds.h24
-rw-r--r--include/uapi/linux/usb/ch9.h24
-rw-r--r--include/uapi/linux/usb/functionfs.h1
-rw-r--r--include/uapi/linux/v4l2-controls.h1
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h97
-rw-r--r--include/uapi/linux/vfio.h10
-rw-r--r--include/uapi/linux/vhost.h2
-rw-r--r--include/uapi/linux/videodev2.h118
-rw-r--r--include/uapi/linux/virtio_crypto.h450
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_types.h6
-rw-r--r--include/uapi/linux/vtpm_proxy.h23
-rw-r--r--include/uapi/rdma/Kbuild2
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h2
-rw-r--r--include/uapi/rdma/hns-abi.h54
-rw-r--r--include/uapi/rdma/ib_user_verbs.h38
-rw-r--r--include/uapi/rdma/mlx5-abi.h38
-rw-r--r--include/uapi/rdma/rdma_user_cm.h12
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h289
-rw-r--r--include/uapi/scsi/fc/fc_fs.h2
-rw-r--r--include/video/display_timing.h4
-rw-r--r--include/video/imx-ipu-v3.h3
-rw-r--r--include/video/of_display_timing.h15
-rw-r--r--include/xen/arm/hypercall.h87
-rw-r--r--include/xen/arm/hypervisor.h39
-rw-r--r--include/xen/arm/interface.h85
-rw-r--r--include/xen/arm/page-coherent.h98
-rw-r--r--include/xen/arm/page.h122
-rw-r--r--include/xen/swiotlb-xen.h3
-rw-r--r--include/xen/xenbus.h4
620 files changed, 20411 insertions, 5749 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 12c2882bf647..d25da936750e 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -146,7 +146,7 @@
/* Maximum number of While() loops before abort */
-#define ACPI_MAX_LOOP_COUNT 0xFFFF
+#define ACPI_MAX_LOOP_COUNT 0x000FFFFF
/******************************************************************************
*
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c1a524de67c5..4242c31ffaee 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -573,6 +573,8 @@ struct acpi_pci_root {
bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
+void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
+void acpi_dma_deconfigure(struct device *dev);
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index d7d0f495a34e..303315b9693f 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -13,6 +13,8 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
}
#endif
+extern bool acpi_permanent_mmap;
+
void __iomem *__ref
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index c7b3a132dbe7..f5e10dd8e86b 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20160831
+#define ACPI_CA_VERSION 0x20160930
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -259,6 +259,13 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
+ * Maximum number of While() loop iterations before forced method abort.
+ * This mechanism is intended to prevent infinite loops during interpreter
+ * execution within a host kernel.
+ */
+ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT);
+
+/*
* This mechanism is used to trace a specified AML method. The method is
* traced each time it is executed.
*/
@@ -506,10 +513,12 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table(acpi_string signature, u32 instance,
struct acpi_table_header
**out_table))
+ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table))
+
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_table_by_index(u32 table_index,
- struct acpi_table_header
- **out_table))
+ acpi_get_table_by_index(u32 table_index,
+ struct acpi_table_header
+ **out_table))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_install_table_handler(acpi_table_handler
handler, void *context))
@@ -958,15 +967,6 @@ void acpi_terminate_debugger(void);
/*
* Divergences
*/
-ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_table_with_size(acpi_string signature,
- u32 instance,
- struct acpi_table_header
- **out_table,
- acpi_size *tbl_size))
-
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_data_full(acpi_handle object,
acpi_object_handler handler,
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index c19700e2a2fe..da5708caf8a1 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -371,6 +371,7 @@ struct acpi_table_desc {
union acpi_name_union signature;
acpi_owner_id owner_id;
u8 flags;
+ u16 validation_count;
};
/* Masks for Flags field above */
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index a5509d87230a..7dbb1141f546 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -142,7 +142,6 @@ static inline void acpi_os_terminate_command_signals(void)
/*
* OSL interfaces added by Linux
*/
-void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
#endif /* __KERNEL__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index f3db11c24654..c1ba00fc4888 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -249,6 +249,7 @@ extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu);
extern void acpi_processor_unregister_performance(unsigned int cpu);
+int acpi_processor_pstate_control(void);
/* note: this locks both the calling module and the processor module
if a _PPC object exists, rmmod is disallowed then */
int acpi_processor_notify_smm(struct module *calling_module);
@@ -294,7 +295,7 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
#ifdef CONFIG_CPU_FREQ
void acpi_processor_ppc_init(void);
void acpi_processor_ppc_exit(void);
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
+void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
#else
static inline void acpi_processor_ppc_init(void)
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 4536bd345ab4..bfe484da55d2 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -30,6 +30,17 @@ struct acpi_device;
#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110
#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
+#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
+#define ACPI_VIDEO_NOTIFY_PROBE 0x81
+#define ACPI_VIDEO_NOTIFY_CYCLE 0x82
+#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT 0x83
+#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT 0x84
+#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS 0x85
+#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
+#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
+#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88
+#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89
+
enum acpi_backlight_type {
acpi_backlight_undef = -1,
acpi_backlight_none = 0,
diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h
new file mode 100644
index 000000000000..939869c772b1
--- /dev/null
+++ b/include/asm-generic/asm-prototypes.h
@@ -0,0 +1,13 @@
+#include <linux/bitops.h>
+#undef __memset
+extern void *__memset(void *, int, __kernel_size_t);
+#undef __memcpy
+extern void *__memcpy(void *, const void *, __kernel_size_t);
+#undef __memmove
+extern void *__memmove(void *, const void *, __kernel_size_t);
+#undef memset
+extern void *memset(void *, int, __kernel_size_t);
+#undef memcpy
+extern void *memcpy(void *, const void *, __kernel_size_t);
+#undef memmove
+extern void *memmove(void *, const void *, __kernel_size_t);
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
index fe386fc6e85e..6bb8cd45f53b 100644
--- a/include/asm-generic/cputime_jiffies.h
+++ b/include/asm-generic/cputime_jiffies.h
@@ -7,7 +7,6 @@ typedef unsigned long __nocast cputime_t;
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
-#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
typedef u64 __nocast cputime64_t;
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index a84e28e0c634..4e3b18e559b1 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -34,7 +34,6 @@ typedef u64 __nocast cputime64_t;
*/
#define cputime_to_jiffies(__ct) \
cputime_div(__ct, NSEC_PER_SEC / HZ)
-#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__jif) \
(__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
#define cputime64_to_jiffies64(__ct) \
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
deleted file mode 100644
index c54829d3de37..000000000000
--- a/include/asm-generic/mutex-dec.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * include/asm-generic/mutex-dec.h
- *
- * Generic implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- */
-#ifndef _ASM_GENERIC_MUTEX_DEC_H
-#define _ASM_GENERIC_MUTEX_DEC_H
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return_acquire(count) < 0))
- fail_fn(count);
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(atomic_dec_return_acquire(count) < 0))
- return -1;
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the count from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, then the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_inc_return_release(count) <= 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1))
- return 1;
- return 0;
-}
-
-#endif
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
deleted file mode 100644
index 61069ed334e2..000000000000
--- a/include/asm-generic/mutex-null.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * include/asm-generic/mutex-null.h
- *
- * Generic implementation of the mutex fastpath, based on NOP :-)
- *
- * This is used by the mutex-debugging infrastructure, but it can also
- * be used by architectures that (for whatever reason) want to use the
- * spinlock based slowpath.
- */
-#ifndef _ASM_GENERIC_MUTEX_NULL_H
-#define _ASM_GENERIC_MUTEX_NULL_H
-
-#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
-#define __mutex_fastpath_lock_retval(count) (-1)
-#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
-#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
-#define __mutex_slowpath_needs_to_unlock() 1
-
-#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
deleted file mode 100644
index 3269ec4e195f..000000000000
--- a/include/asm-generic/mutex-xchg.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * include/asm-generic/mutex-xchg.h
- *
- * Generic implementation of the mutex fastpath, based on xchg().
- *
- * NOTE: An xchg based implementation might be less optimal than an atomic
- * decrement/increment based implementation. If your architecture
- * has a reasonable atomic dec/inc then you should probably use
- * asm-generic/mutex-dec.h instead, or you could open-code an
- * optimized version in asm/mutex.h.
- */
-#ifndef _ASM_GENERIC_MUTEX_XCHG_H
-#define _ASM_GENERIC_MUTEX_XCHG_H
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
- * wasn't 1 originally. This function MUST leave the value lower than 1
- * even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_xchg(count, 0) != 1))
- /*
- * We failed to acquire the lock, so mark it contended
- * to ensure that any waiting tasks are woken up by the
- * unlock slow path.
- */
- if (likely(atomic_xchg_acquire(count, -1) != 1))
- fail_fn(count);
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(atomic_xchg_acquire(count, 0) != 1))
- if (likely(atomic_xchg(count, -1) != 1))
- return -1;
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than one.
- * If the implementation sets it to a value of lower than one, the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_xchg_release(count, 1) != 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 0
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: spinlock based trylock implementation
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int prev;
-
- if (atomic_read(count) != 1)
- return 0;
-
- prev = atomic_xchg_acquire(count, 0);
- if (unlikely(prev < 0)) {
- /*
- * The lock was marked contended so we must restore that
- * state. If while doing so we get back a prev value of 1
- * then we just own it.
- *
- * [ In the rare case of the mutex going to 1, to 0, to -1
- * and then back to 0 in this few-instructions window,
- * this has the potential to trigger the slowpath for the
- * owner's unlock path needlessly, but that's not a problem
- * in practice. ]
- */
- prev = atomic_xchg_acquire(count, prev);
- if (prev < 0)
- prev = 0;
- }
-
- return prev;
-}
-
-#endif
diff --git a/include/asm-generic/mutex.h b/include/asm-generic/mutex.h
deleted file mode 100644
index fe91ab502793..000000000000
--- a/include/asm-generic/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_GENERIC_MUTEX_H
-#define __ASM_GENERIC_MUTEX_H
-/*
- * Pull in the generic implementation for the mutex fastpath,
- * which is a reasonable default on many architectures.
- */
-
-#include <asm-generic/mutex-dec.h>
-#endif /* __ASM_GENERIC_MUTEX_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index c4f8fd2fd384..18af2bcefe6a 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -558,10 +558,9 @@ static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
* track_pfn_insert is called when a _new_ single pfn is established
* by vm_insert_pfn().
*/
-static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
+static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn)
{
- return 0;
}
/*
@@ -593,8 +592,8 @@ static inline void untrack_pfn_moved(struct vm_area_struct *vma)
extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr,
unsigned long size);
-extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn);
+extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn);
extern int track_pfn_copy(struct vm_area_struct *vma);
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size);
@@ -653,18 +652,9 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
}
#endif
-#ifndef pmd_move_must_withdraw
-static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
- spinlock_t *old_pmd_ptl)
-{
- /*
- * With split pmd lock we also need to move preallocated
- * PTE page table if new_pmd is on different PMD page table.
- */
- return new_pmd_ptl != old_pmd_ptl;
-}
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
#endif
-
/*
* This function is meant to be used by sites walking pagetables with
* the mmap_sem hold in read mode to protect against MADV_DONTNEED and
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
index 0a769feb22b0..157bbf6f4510 100644
--- a/include/asm-generic/termios-base.h
+++ b/include/asm-generic/termios-base.h
@@ -4,7 +4,7 @@
#ifndef _ASM_GENERIC_TERMIOS_BASE_H
#define _ASM_GENERIC_TERMIOS_BASE_H
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifndef __ARCH_TERMIO_GETPUT
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
index 4fa6fe0fc2a2..8c13a16b074e 100644
--- a/include/asm-generic/termios.h
+++ b/include/asm-generic/termios.h
@@ -2,7 +2,7 @@
#define _ASM_GENERIC_TERMIOS_H
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <uapi/asm-generic/termios.h>
/* intr=^C quit=^\ erase=del kill=^U
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index c6d667187608..7eed8cf3130a 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -107,11 +107,6 @@ struct mmu_gather {
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
unsigned int batch_count;
- /*
- * __tlb_adjust_range will track the new addr here,
- * that that we can adjust the range after the flush
- */
- unsigned long addr;
int page_size;
};
@@ -125,16 +120,11 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
- unsigned long address)
+ unsigned long address,
+ unsigned int range_size)
{
tlb->start = min(tlb->start, address);
- tlb->end = max(tlb->end, address + PAGE_SIZE);
- /*
- * Track the last address with which we adjusted the range. This
- * will be used later to adjust again after a mmu_flush due to
- * failed __tlb_remove_page
- */
- tlb->addr = address;
+ tlb->end = max(tlb->end, address + range_size);
}
static inline void __tlb_reset_range(struct mmu_gather *tlb)
@@ -150,15 +140,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
- if (__tlb_remove_page_size(tlb, page, page_size)) {
+ if (__tlb_remove_page_size(tlb, page, page_size))
tlb_flush_mmu(tlb);
- tlb->page_size = page_size;
- __tlb_adjust_range(tlb, tlb->addr);
- __tlb_remove_page_size(tlb, page, page_size);
- }
}
-static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
@@ -172,14 +158,21 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
-static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
+#ifndef tlb_remove_check_page_size_change
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
{
- /* active->nr should be zero when we call this */
- VM_BUG_ON_PAGE(tlb->active->nr, page);
- tlb->page_size = PAGE_SIZE;
- __tlb_adjust_range(tlb, tlb->addr);
- return __tlb_remove_page(tlb, page);
+ /*
+ * We don't care about page size change, just update
+ * mmu_gather page size here so that debug checks
+ * doesn't throw false warning.
+ */
+#ifdef CONFIG_DEBUG_VM
+ tlb->page_size = page_size;
+#endif
}
+#endif
/*
* In the case of tlb vma handling, we can optimise these away in the
@@ -215,10 +208,16 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, huge_page_size(h)); \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
/**
* tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
* This is a nop so far, because only x86 needs it.
@@ -227,29 +226,47 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
#endif
-#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
- do { \
- __tlb_adjust_range(tlb, address); \
- __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
+#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
+ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
+/*
+ * For things like page tables caches (ie caching addresses "inside" the
+ * page tables, like x86 does), for legacy reasons, flushing an
+ * individual page had better flush the page table caches behind it. This
+ * is definitely how x86 works, for example. And if you have an
+ * architected non-legacy page table cache (which I'm not aware of
+ * anybody actually doing), you're going to have some architecturally
+ * explicit flushing for that, likely *separate* from a regular TLB entry
+ * flush, and thus you'd need more than just some range expansion..
+ *
+ * So if we ever find an architecture
+ * that would want something that odd, I think it is up to that
+ * architecture to do its own odd thing, not cause pain for others
+ * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
+ *
+ * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
+ */
+
#define pte_free_tlb(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#ifndef __ARCH_HAS_4LEVEL_HACK
#define pud_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 31e1d639abed..0968d13b3885 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -114,7 +114,7 @@
#ifdef CONFIG_KPROBES
#define KPROBE_BLACKLIST() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
- *(_kprobe_blacklist) \
+ KEEP(*(_kprobe_blacklist)) \
VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
#else
#define KPROBE_BLACKLIST()
@@ -123,10 +123,10 @@
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
- *(_ftrace_events) \
+ KEEP(*(_ftrace_events)) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
- *(_ftrace_enum_map) \
+ KEEP(*(_ftrace_enum_map)) \
VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
#else
#define FTRACE_EVENTS()
@@ -134,10 +134,10 @@
#ifdef CONFIG_TRACING
#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
- *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
+ KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
- *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
+ KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
#else
#define TRACE_PRINTKS()
@@ -147,7 +147,7 @@
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
- *(__syscalls_metadata) \
+ KEEP(*(__syscalls_metadata)) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
#define TRACE_SYSCALLS()
@@ -156,7 +156,7 @@
#ifdef CONFIG_SERIAL_EARLYCON
#define EARLYCON_TABLE() STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__earlycon_table) = .; \
- *(__earlycon_table) \
+ KEEP(*(__earlycon_table)) \
VMLINUX_SYMBOL(__earlycon_table_end) = .;
#else
#define EARLYCON_TABLE()
@@ -169,8 +169,8 @@
#define _OF_TABLE_1(name) \
. = ALIGN(8); \
VMLINUX_SYMBOL(__##name##_of_table) = .; \
- *(__##name##_of_table) \
- *(__##name##_of_table_end)
+ KEEP(*(__##name##_of_table)) \
+ KEEP(*(__##name##_of_table_end))
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
@@ -184,7 +184,7 @@
#define ACPI_PROBE_TABLE(name) \
. = ALIGN(8); \
VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \
- *(__##name##_acpi_probe_table) \
+ KEEP(*(__##name##_acpi_probe_table)) \
VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
#else
#define ACPI_PROBE_TABLE(name)
@@ -193,7 +193,7 @@
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \
- *(.dtb.init.rodata) \
+ KEEP(*(.dtb.init.rodata)) \
VMLINUX_SYMBOL(__dtb_end) = .;
/*
@@ -214,11 +214,11 @@
/* implement dynamic printk debug */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___jump_table) = .; \
- *(__jump_table) \
+ KEEP(*(__jump_table)) \
VMLINUX_SYMBOL(__stop___jump_table) = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
- *(__verbose) \
+ KEEP(*(__verbose)) \
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
@@ -274,10 +274,10 @@
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
RO_AFTER_INIT_DATA /* Read only after init */ \
- *(__vermagic) /* Kernel version magic */ \
+ KEEP(*(__vermagic)) /* Kernel version magic */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
- *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
+ KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
@@ -291,35 +291,35 @@
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
- *(.pci_fixup_early) \
+ KEEP(*(.pci_fixup_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
- *(.pci_fixup_header) \
+ KEEP(*(.pci_fixup_header)) \
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
- *(.pci_fixup_final) \
+ KEEP(*(.pci_fixup_final)) \
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
- *(.pci_fixup_enable) \
+ KEEP(*(.pci_fixup_enable)) \
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
- *(.pci_fixup_resume) \
+ KEEP(*(.pci_fixup_resume)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
- *(.pci_fixup_resume_early) \
+ KEEP(*(.pci_fixup_resume_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
- *(.pci_fixup_suspend) \
+ KEEP(*(.pci_fixup_suspend)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
- *(.pci_fixup_suspend_late) \
+ KEEP(*(.pci_fixup_suspend_late)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
- *(.builtin_fw) \
+ KEEP(*(.builtin_fw)) \
VMLINUX_SYMBOL(__end_builtin_fw) = .; \
} \
\
@@ -397,7 +397,7 @@
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- KEEP(*(__ksymtab_strings)) \
+ *(__ksymtab_strings) \
} \
\
/* __*init sections */ \
@@ -410,14 +410,14 @@
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
- *(__param) \
+ KEEP(*(__param)) \
VMLINUX_SYMBOL(__stop___param) = .; \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___modver) = .; \
- *(__modver) \
+ KEEP(*(__modver)) \
VMLINUX_SYMBOL(__stop___modver) = .; \
. = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
@@ -520,7 +520,7 @@
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ex_table) = .; \
- *(__ex_table) \
+ KEEP(*(__ex_table)) \
VMLINUX_SYMBOL(__stop___ex_table) = .; \
}
@@ -536,9 +536,9 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
- *(.ctors) \
- *(SORT(.init_array.*)) \
- *(.init_array) \
+ KEEP(*(.ctors)) \
+ KEEP(*(SORT(.init_array.*))) \
+ KEEP(*(.init_array)) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
#define KERNEL_CTORS()
@@ -566,6 +566,7 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(clksrc) \
+ ACPI_PROBE_TABLE(iort) \
EARLYCON_TABLE()
#define INIT_TEXT \
@@ -662,7 +663,7 @@
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___bug_table) = .; \
- *(__bug_table) \
+ KEEP(*(__bug_table)) \
VMLINUX_SYMBOL(__stop___bug_table) = .; \
}
#else
@@ -674,7 +675,7 @@
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__tracedata_start) = .; \
- *(.tracedata) \
+ KEEP(*(.tracedata)) \
VMLINUX_SYMBOL(__tracedata_end) = .; \
}
#else
@@ -691,7 +692,7 @@
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
VMLINUX_SYMBOL(__setup_start) = .; \
- *(.init.setup) \
+ KEEP(*(.init.setup)) \
VMLINUX_SYMBOL(__setup_end) = .;
#define INIT_CALLS_LEVEL(level) \
diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h
index 1efbe5a66958..a9a0f03024a4 100644
--- a/include/clocksource/pxa.h
+++ b/include/clocksource/pxa.h
@@ -12,7 +12,6 @@
#ifndef _CLOCKSOURCE_PXA_H
#define _CLOCKSOURCE_PXA_H
-extern void pxa_timer_nodt_init(int irq, void __iomem *base,
- unsigned long clock_tick_rate);
+extern void pxa_timer_nodt_init(int irq, void __iomem *base);
#endif
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
new file mode 100644
index 000000000000..e328b52425a8
--- /dev/null
+++ b/include/crypto/acompress.h
@@ -0,0 +1,269 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ACOMP_H
+#define _CRYPTO_ACOMP_H
+#include <linux/crypto.h>
+
+#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
+
+/**
+ * struct acomp_req - asynchronous (de)compression request
+ *
+ * @base: Common attributes for asynchronous crypto requests
+ * @src: Source Data
+ * @dst: Destination data
+ * @slen: Size of the input buffer
+ * @dlen: Size of the output buffer and number of bytes produced
+ * @flags: Internal flags
+ * @__ctx: Start of private context data
+ */
+struct acomp_req {
+ struct crypto_async_request base;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ unsigned int slen;
+ unsigned int dlen;
+ u32 flags;
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_acomp - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @dst_free: Frees destination buffer if allocated inside the
+ * algorithm
+ * @reqsize: Context size for (de)compression requests
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_acomp {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ void (*dst_free)(struct scatterlist *dst);
+ unsigned int reqsize;
+ struct crypto_tfm base;
+};
+
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @dst_free: Frees destination buffer if allocated inside the algorithm
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Context size for (de)compression requests
+ * @base: Common crypto API algorithm data structure
+ */
+struct acomp_alg {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ void (*dst_free)(struct scatterlist *dst);
+ int (*init)(struct crypto_acomp *tfm);
+ void (*exit)(struct crypto_acomp *tfm);
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+/**
+ * DOC: Asynchronous Compression API
+ *
+ * The Asynchronous Compression API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * compression algorithm e.g. "deflate"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for a compression algorithm. The returned struct
+ * crypto_acomp is the handle that is required for any subsequent
+ * API invocation for the compression operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+ u32 mask);
+
+static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
+{
+ return &tfm->base;
+}
+
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct acomp_alg, base);
+}
+
+static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_acomp, base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
+{
+ return tfm->reqsize;
+}
+
+static inline void acomp_request_set_tfm(struct acomp_req *req,
+ struct crypto_acomp *tfm)
+{
+ req->base.tfm = crypto_acomp_tfm(tfm);
+}
+
+static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
+{
+ return __crypto_acomp_tfm(req->base.tfm);
+}
+
+/**
+ * crypto_free_acomp() -- free ACOMPRESS tfm handle
+ *
+ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ */
+static inline void crypto_free_acomp(struct crypto_acomp *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
+}
+
+static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_ACOMPRESS;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * acomp_request_alloc() -- allocates asynchronous (de)compression request
+ *
+ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * Return: allocated handle in case of success or NULL in case of an error
+ */
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
+
+/**
+ * acomp_request_free() -- zeroize and free asynchronous (de)compression
+ * request as well as the output buffer if allocated
+ * inside the algorithm
+ *
+ * @req: request to free
+ */
+void acomp_request_free(struct acomp_req *req);
+
+/**
+ * acomp_request_set_callback() -- Sets an asynchronous callback
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req: request that the callback will be set for
+ * @flgs: specify for instance if the operation may backlog
+ * @cmlp: callback which will be called
+ * @data: private data used by the caller
+ */
+static inline void acomp_request_set_callback(struct acomp_req *req,
+ u32 flgs,
+ crypto_completion_t cmpl,
+ void *data)
+{
+ req->base.complete = cmpl;
+ req->base.data = data;
+ req->base.flags = flgs;
+}
+
+/**
+ * acomp_request_set_params() -- Sets request parameters
+ *
+ * Sets parameters required by an acomp operation
+ *
+ * @req: asynchronous compress request
+ * @src: pointer to input buffer scatterlist
+ * @dst: pointer to output buffer scatterlist. If this is NULL, the
+ * acomp layer will allocate the output memory
+ * @slen: size of the input buffer
+ * @dlen: size of the output buffer. If dst is NULL, this can be used by
+ * the user to specify the maximum amount of memory to allocate
+ */
+static inline void acomp_request_set_params(struct acomp_req *req,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int slen,
+ unsigned int dlen)
+{
+ req->src = src;
+ req->dst = dst;
+ req->slen = slen;
+ req->dlen = dlen;
+
+ if (!req->dst)
+ req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
+}
+
+/**
+ * crypto_acomp_compress() -- Invoke asynchronous compress operation
+ *
+ * Function invokes the asynchronous compress operation
+ *
+ * @req: asynchronous compress request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_acomp_compress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ return tfm->compress(req);
+}
+
+/**
+ * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
+ *
+ * Function invokes the asynchronous decompress operation
+ *
+ * @req: asynchronous compress request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_acomp_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ return tfm->decompress(req);
+}
+
+#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 12f84327ca36..03b97629442c 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -55,14 +55,14 @@
* The scatter list pointing to the input data must contain:
*
* * for RFC4106 ciphers, the concatenation of
- * associated authentication data || IV || plaintext or ciphertext. Note, the
- * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
- * the API call of aead_request_set_ad must provide the length of the AAD and
- * the IV. The API call of aead_request_set_crypt only points to the size of
- * the input plaintext or ciphertext.
+ * associated authentication data || IV || plaintext or ciphertext. Note, the
+ * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
+ * the API call of aead_request_set_ad must provide the length of the AAD and
+ * the IV. The API call of aead_request_set_crypt only points to the size of
+ * the input plaintext or ciphertext.
*
* * for "normal" AEAD ciphers, the concatenation of
- * associated authentication data || plaintext or ciphertext.
+ * associated authentication data || plaintext or ciphertext.
*
* It is important to note that if multiple scatter gather list entries form
* the input data mentioned above, the first entry must not point to a NULL
@@ -452,7 +452,7 @@ static inline void aead_request_free(struct aead_request *req)
* completes
*
* The callback function is registered with the aead_request handle and
- * must comply with the following template
+ * must comply with the following template::
*
* void callback_function(struct crypto_async_request *req, int error)
*/
@@ -483,30 +483,18 @@ static inline void aead_request_set_callback(struct aead_request *req,
* destination is the ciphertext. For a decryption operation, the use is
* reversed - the source is the ciphertext and the destination is the plaintext.
*
- * For both src/dst the layout is associated data, plain/cipher text,
- * authentication tag.
- *
- * The content of the AD in the destination buffer after processing
- * will either be untouched, or it will contain a copy of the AD
- * from the source buffer. In order to ensure that it always has
- * a copy of the AD, the user must copy the AD over either before
- * or after processing. Of course this is not relevant if the user
- * is doing in-place processing where src == dst.
- *
- * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
- * the caller must concatenate the ciphertext followed by the
- * authentication tag and provide the entire data stream to the
- * decryption operation (i.e. the data length used for the
- * initialization of the scatterlist and the data length for the
- * decryption operation is identical). For encryption, however,
- * the authentication tag is created while encrypting the data.
- * The destination buffer must hold sufficient space for the
- * ciphertext and the authentication tag while the encryption
- * invocation must only point to the plaintext data size. The
- * following code snippet illustrates the memory usage
- * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
- * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
- * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ * The memory structure for cipher operation has the following structure:
+ *
+ * - AEAD encryption input: assoc data || plaintext
+ * - AEAD encryption output: assoc data || cipherntext || auth tag
+ * - AEAD decryption input: assoc data || ciphertext || auth tag
+ * - AEAD decryption output: assoc data || plaintext
+ *
+ * Albeit the kernel requires the presence of the AAD buffer, however,
+ * the kernel does not fill the AAD buffer in the output case. If the
+ * caller wants to have that data buffer filled, the caller must either
+ * use an in-place cipher operation (i.e. same memory location for
+ * input/output memory location).
*/
static inline void aead_request_set_crypt(struct aead_request *req,
struct scatterlist *src,
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
new file mode 100644
index 000000000000..f5b8bfc22e6d
--- /dev/null
+++ b/include/crypto/cbc.h
@@ -0,0 +1,146 @@
+/*
+ * CBC: Cipher Block Chaining mode
+ *
+ * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_CBC_H
+#define _CRYPTO_CBC_H
+
+#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static inline int crypto_cbc_encrypt_segment(
+ struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+ void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+ unsigned int bsize = crypto_skcipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ crypto_xor(iv, src, bsize);
+ fn(tfm, iv, dst);
+ memcpy(iv, dst, bsize);
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ return nbytes;
+}
+
+static inline int crypto_cbc_encrypt_inplace(
+ struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+ void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+ unsigned int bsize = crypto_skcipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ crypto_xor(src, iv, bsize);
+ fn(tfm, src, src);
+ iv = src;
+
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
+ void (*fn)(struct crypto_skcipher *,
+ const u8 *, u8 *))
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_walk walk;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes) {
+ if (walk.src.virt.addr == walk.dst.virt.addr)
+ err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
+ else
+ err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
+ err = skcipher_walk_done(&walk, err);
+ }
+
+ return err;
+}
+
+static inline int crypto_cbc_decrypt_segment(
+ struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+ void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+ unsigned int bsize = crypto_skcipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ fn(tfm, src, dst);
+ crypto_xor(dst, iv, bsize);
+ iv = src;
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static inline int crypto_cbc_decrypt_inplace(
+ struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+ void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+ unsigned int bsize = crypto_skcipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 last_iv[bsize];
+
+ /* Start of the last block. */
+ src += nbytes - (nbytes & (bsize - 1)) - bsize;
+ memcpy(last_iv, src, bsize);
+
+ for (;;) {
+ fn(tfm, src, src);
+ if ((nbytes -= bsize) < bsize)
+ break;
+ crypto_xor(src, src - bsize, bsize);
+ src -= bsize;
+ }
+
+ crypto_xor(src, walk->iv, bsize);
+ memcpy(walk->iv, last_iv, bsize);
+
+ return nbytes;
+}
+
+static inline int crypto_cbc_decrypt_blocks(
+ struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+ void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+ if (walk->src.virt.addr == walk->dst.virt.addr)
+ return crypto_cbc_decrypt_inplace(walk, tfm, fn);
+ else
+ return crypto_cbc_decrypt_segment(walk, tfm, fn);
+}
+
+#endif /* _CRYPTO_CBC_H */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index bc792d5a9e88..94418cbf9013 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -12,10 +12,10 @@
#ifndef _CRYPTO_CRYPT_H
#define _CRYPTO_CRYPT_H
-#include <linux/crypto.h>
#include <linux/kernel.h>
#include <crypto/aead.h>
#include <crypto/hash.h>
+#include <crypto/skcipher.h>
struct cryptd_ablkcipher {
struct crypto_ablkcipher base;
@@ -34,6 +34,17 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
+struct cryptd_skcipher {
+ struct crypto_skcipher base;
+};
+
+struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
+ u32 type, u32 mask);
+struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
+/* Must be called without moving CPUs. */
+bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm);
+void cryptd_free_skcipher(struct cryptd_skcipher *tfm);
+
struct cryptd_ahash {
struct crypto_ahash base;
};
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index 5102a8f282e6..6b424ad3482e 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -13,6 +13,27 @@
#ifndef _CRYPTO_DH_
#define _CRYPTO_DH_
+/**
+ * DOC: DH Helper Functions
+ *
+ * To use DH with the KPP cipher API, the following data structure and
+ * functions should be used.
+ *
+ * To use DH with KPP, the following functions should be used to operate on
+ * a DH private key. The packet private key that can be set with
+ * the KPP API function call of crypto_kpp_set_secret.
+ */
+
+/**
+ * struct dh - define a DH private key
+ *
+ * @key: Private DH key
+ * @p: Diffie-Hellman parameter P
+ * @g: Diffie-Hellman generator G
+ * @key_size: Size of the private DH key
+ * @p_size: Size of DH parameter P
+ * @g_size: Size of DH generator G
+ */
struct dh {
void *key;
void *p;
@@ -22,8 +43,45 @@ struct dh {
unsigned int g_size;
};
+/**
+ * crypto_dh_key_len() - Obtain the size of the private DH key
+ * @params: private DH key
+ *
+ * This function returns the packet DH key size. A caller can use that
+ * with the provided DH private key reference to obtain the required
+ * memory size to hold a packet key.
+ *
+ * Return: size of the key in bytes
+ */
int crypto_dh_key_len(const struct dh *params);
+
+/**
+ * crypto_dh_encode_key() - encode the private key
+ * @buf: Buffer allocated by the caller to hold the packet DH
+ * private key. The buffer should be at least crypto_dh_key_len
+ * bytes in size.
+ * @len: Length of the packet private key buffer
+ * @params: Buffer with the caller-specified private key
+ *
+ * The DH implementations operate on a packet representation of the private
+ * key.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
+
+/**
+ * crypto_dh_decode_key() - decode a private key
+ * @buf: Buffer holding a packet key that should be decoded
+ * @len: Lenth of the packet private key buffer
+ * @params: Buffer allocated by the caller that is filled with the
+ * unpacket DH private key.
+ *
+ * The unpacking obtains the private key by pointing @p to the correct location
+ * in @buf. Thus, both pointers refer to the same memory.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
#endif
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index 84bad548d194..03a64f62ba7a 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -13,18 +13,76 @@
#ifndef _CRYPTO_ECDH_
#define _CRYPTO_ECDH_
+/**
+ * DOC: ECDH Helper Functions
+ *
+ * To use ECDH with the KPP cipher API, the following data structure and
+ * functions should be used.
+ *
+ * The ECC curves known to the ECDH implementation are specified in this
+ * header file.
+ *
+ * To use ECDH with KPP, the following functions should be used to operate on
+ * an ECDH private key. The packet private key that can be set with
+ * the KPP API function call of crypto_kpp_set_secret.
+ */
+
/* Curves IDs */
#define ECC_CURVE_NIST_P192 0x0001
#define ECC_CURVE_NIST_P256 0x0002
+/**
+ * struct ecdh - define an ECDH private key
+ *
+ * @curve_id: ECC curve the key is based on.
+ * @key: Private ECDH key
+ * @key_size: Size of the private ECDH key
+ */
struct ecdh {
unsigned short curve_id;
char *key;
unsigned short key_size;
};
+/**
+ * crypto_ecdh_key_len() - Obtain the size of the private ECDH key
+ * @params: private ECDH key
+ *
+ * This function returns the packet ECDH key size. A caller can use that
+ * with the provided ECDH private key reference to obtain the required
+ * memory size to hold a packet key.
+ *
+ * Return: size of the key in bytes
+ */
int crypto_ecdh_key_len(const struct ecdh *params);
+
+/**
+ * crypto_ecdh_encode_key() - encode the private key
+ * @buf: Buffer allocated by the caller to hold the packet ECDH
+ * private key. The buffer should be at least crypto_ecdh_key_len
+ * bytes in size.
+ * @len: Length of the packet private key buffer
+ * @p: Buffer with the caller-specified private key
+ *
+ * The ECDH implementations operate on a packet representation of the private
+ * key.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p);
+
+/**
+ * crypto_ecdh_decode_key() - decode a private key
+ * @buf: Buffer holding a packet key that should be decoded
+ * @len: Lenth of the packet private key buffer
+ * @p: Buffer allocated by the caller that is filled with the
+ * unpacket ECDH private key.
+ *
+ * The unpacking obtains the private key by pointing @p to the correct location
+ * in @buf. Thus, both pointers refer to the same memory.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p);
#endif
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 04eb5c77addd..1bf600fc99f7 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -43,8 +43,7 @@
* @prepare_hash_request: do some prepare if need before handle the current request
* @unprepare_hash_request: undo any work done by prepare_hash_request()
* @hash_one_request: do hash for current request
- * @kworker: thread struct for request pump
- * @kworker_task: pointer to task for request pump kworker thread
+ * @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
* @cur_req: the current request which is on processing
@@ -78,8 +77,7 @@ struct crypto_engine {
int (*hash_one_request)(struct crypto_engine *engine,
struct ahash_request *req);
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
+ struct kthread_worker *kworker;
struct kthread_work pump_requests;
void *priv_data;
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index da2530e34b26..592d47e565a8 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -177,24 +177,23 @@ void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
- kfree(t);
+ kzfree(t);
}
-/* 64k table optimization, implemented for lle and bbe */
+/* 64k table optimization, implemented for bbe */
struct gf128mul_64k {
struct gf128mul_4k *t[16];
};
-/* first initialize with the constant factor with which you
- * want to multiply and then call gf128_64k_lle with the other
- * factor in the first argument, the table in the second and a
- * scratch register in the third. Afterwards *a = *r. */
-struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g);
+/* First initialize with the constant factor with which you
+ * want to multiply and then call gf128mul_64k_bbe with the other
+ * factor in the first argument, and the table in the second.
+ * Afterwards, the result is stored in *a.
+ */
struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
void gf128mul_free_64k(struct gf128mul_64k *t);
-void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t);
void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
#endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 26605888a199..216a2b876147 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -605,7 +605,7 @@ static inline struct ahash_request *ahash_request_cast(
* the cipher operation completes.
*
* The callback function is registered with the &ahash_request handle and
- * must comply with the following template
+ * must comply with the following template::
*
* void callback_function(struct crypto_async_request *req, int error)
*/
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
new file mode 100644
index 000000000000..1de2b5af12d7
--- /dev/null
+++ b/include/crypto/internal/acompress.h
@@ -0,0 +1,81 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ACOMP_INT_H
+#define _CRYPTO_ACOMP_INT_H
+#include <crypto/acompress.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *acomp_request_ctx(struct acomp_req *req)
+{
+ return req->__ctx;
+}
+
+static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
+{
+ return tfm->base.__crt_ctx;
+}
+
+static inline void acomp_request_complete(struct acomp_req *req,
+ int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
+{
+ return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
+}
+
+static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
+{
+ struct acomp_req *req;
+
+ req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
+ if (likely(req))
+ acomp_request_set_tfm(req, tfm);
+ return req;
+}
+
+static inline void __acomp_request_free(struct acomp_req *req)
+{
+ kzfree(req);
+}
+
+/**
+ * crypto_register_acomp() -- Register asynchronous compression algorithm
+ *
+ * Function registers an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_acomp(struct acomp_alg *alg);
+
+/**
+ * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
+ *
+ * Function unregisters an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_unregister_acomp(struct acomp_alg *alg);
+
+#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
new file mode 100644
index 000000000000..3fda3c5655a0
--- /dev/null
+++ b/include/crypto/internal/scompress.h
@@ -0,0 +1,136 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_SCOMP_INT_H
+#define _CRYPTO_SCOMP_INT_H
+#include <linux/crypto.h>
+
+#define SCOMP_SCRATCH_SIZE 131072
+
+struct crypto_scomp {
+ struct crypto_tfm base;
+};
+
+/**
+ * struct scomp_alg - synchronous compression algorithm
+ *
+ * @alloc_ctx: Function allocates algorithm specific context
+ * @free_ctx: Function frees context allocated with alloc_ctx
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ * @base: Common crypto API algorithm data structure
+ */
+struct scomp_alg {
+ void *(*alloc_ctx)(struct crypto_scomp *tfm);
+ void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
+ int (*compress)(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx);
+ int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx);
+ struct crypto_alg base;
+};
+
+static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct scomp_alg, base);
+}
+
+static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_scomp, base);
+}
+
+static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
+{
+ return &tfm->base;
+}
+
+static inline void crypto_free_scomp(struct crypto_scomp *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
+}
+
+static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
+{
+ return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
+}
+
+static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
+{
+ return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
+}
+
+static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
+ void *ctx)
+{
+ return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
+}
+
+static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
+}
+
+static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
+ ctx);
+}
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
+
+/**
+ * crypto_register_scomp() -- Register synchronous compression algorithm
+ *
+ * Function registers an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_scomp(struct scomp_alg *alg);
+
+/**
+ * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
+ *
+ * Function unregisters an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_unregister_scomp(struct scomp_alg *alg);
+
+#endif
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
new file mode 100644
index 000000000000..429509968f68
--- /dev/null
+++ b/include/crypto/internal/simd.h
@@ -0,0 +1,17 @@
+/*
+ * Shared crypto simd helpers
+ */
+
+#ifndef _CRYPTO_INTERNAL_SIMD_H
+#define _CRYPTO_INTERNAL_SIMD_H
+
+struct simd_skcipher_alg;
+
+struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename);
+struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+ const char *basename);
+void simd_skcipher_free(struct simd_skcipher_alg *alg);
+
+#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index a21a95e1a375..8735979ed341 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -15,8 +15,10 @@
#include <crypto/algapi.h>
#include <crypto/skcipher.h>
+#include <linux/list.h>
#include <linux/types.h>
+struct aead_request;
struct rtattr;
struct skcipher_instance {
@@ -34,6 +36,40 @@ struct crypto_skcipher_spawn {
struct crypto_spawn base;
};
+struct skcipher_walk {
+ union {
+ struct {
+ struct page *page;
+ unsigned long offset;
+ } phys;
+
+ struct {
+ u8 *page;
+ void *addr;
+ } virt;
+ } src, dst;
+
+ struct scatter_walk in;
+ unsigned int nbytes;
+
+ struct scatter_walk out;
+ unsigned int total;
+
+ struct list_head buffers;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int chunksize;
+ unsigned int alignmask;
+};
+
extern const struct crypto_type crypto_givcipher_type;
static inline struct crypto_instance *skcipher_crypto_instance(
@@ -68,14 +104,6 @@ static inline void crypto_set_skcipher_spawn(
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask);
-static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn,
- const char *name, u32 type, u32 mask)
-{
- return crypto_grab_skcipher(spawn, name, type, mask);
-}
-
-struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
-
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
@@ -99,12 +127,6 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher(
return crypto_spawn_tfm2(&spawn->base);
}
-static inline struct crypto_skcipher *crypto_spawn_skcipher2(
- struct crypto_skcipher_spawn *spawn)
-{
- return crypto_spawn_skcipher(spawn);
-}
-
static inline void crypto_skcipher_set_reqsize(
struct crypto_skcipher *skcipher, unsigned int reqsize)
{
@@ -118,6 +140,21 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
int skcipher_register_instance(struct crypto_template *tmpl,
struct skcipher_instance *inst);
+int skcipher_walk_done(struct skcipher_walk *walk, int err);
+int skcipher_walk_virt(struct skcipher_walk *walk,
+ struct skcipher_request *req,
+ bool atomic);
+void skcipher_walk_atomise(struct skcipher_walk *walk);
+int skcipher_walk_async(struct skcipher_walk *walk,
+ struct skcipher_request *req);
+int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
+ bool atomic);
+int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic);
+void skcipher_walk_complete(struct skcipher_walk *walk, int err);
+
static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
int err)
{
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 30791f75c180..4307a2f2365f 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -71,7 +71,7 @@ struct crypto_kpp {
*
* @reqsize: Request context size required by algorithm
* implementation
- * @base Common crypto API algorithm data structure
+ * @base: Common crypto API algorithm data structure
*/
struct kpp_alg {
int (*set_secret)(struct crypto_kpp *tfm, void *buffer,
@@ -89,7 +89,7 @@ struct kpp_alg {
};
/**
- * DOC: Generic Key-agreement Protocol Primitevs API
+ * DOC: Generic Key-agreement Protocol Primitives API
*
* The KPP API is used with the algorithm type
* CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto)
@@ -264,6 +264,12 @@ struct kpp_secret {
* Function invokes the specific kpp operation for a given alg.
*
* @tfm: tfm handle
+ * @buffer: Buffer holding the packet representation of the private
+ * key. The structure of the packet key depends on the particular
+ * KPP implementation. Packing and unpacking helpers are provided
+ * for ECDH and DH (see the respective header files for those
+ * implementations).
+ * @len: Length of the packet private key buffer.
*
* Return: zero on success; error code in case of error
*/
@@ -279,7 +285,10 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer,
* crypto_kpp_generate_public_key() - Invoke kpp operation
*
* Function invokes the specific kpp operation for generating the public part
- * for a given kpp algorithm
+ * for a given kpp algorithm.
+ *
+ * To generate a private key, the caller should use a random number generator.
+ * The output of the requested length serves as the private key.
*
* @req: kpp key request
*
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index cc4d98a7892e..750b14f1ada4 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -516,7 +516,7 @@ static inline void skcipher_request_zero(struct skcipher_request *req)
* skcipher_request_set_callback() - set asynchronous callback function
* @req: request handle
* @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
* increase the wait queue beyond the initial maximum size;
* CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
* @compl: callback function pointer to be registered with the request handle
@@ -533,7 +533,7 @@ static inline void skcipher_request_zero(struct skcipher_request *req)
* cipher operation completes.
*
* The callback function is registered with the skcipher_request handle and
- * must comply with the following template
+ * must comply with the following template::
*
* void callback_function(struct crypto_async_request *req, int error)
*/
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index ede6b97b24cc..77b630672b2c 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -2,8 +2,7 @@
#define _CRYPTO_XTS_H
#include <crypto/b128ops.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/fips.h>
struct scatterlist;
@@ -51,4 +50,27 @@ static inline int xts_check_key(struct crypto_tfm *tfm,
return 0;
}
+static inline int xts_verify_key(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ /*
+ * key consists of keys of equal size concatenated, therefore
+ * the length must be even.
+ */
+ if (keylen % 2) {
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* ensure that the AES and tweak key are not identical */
+ if ((fips_enabled || crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_WEAK_KEY) &&
+ !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#endif /* _CRYPTO_XTS_H */
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
new file mode 100644
index 000000000000..3629b2734db6
--- /dev/null
+++ b/include/drm/bridge/mhl.h
@@ -0,0 +1,291 @@
+/*
+ * Defines for Mobile High-Definition Link (MHL) interface
+ *
+ * Copyright (C) 2015, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MHL_H__
+#define __MHL_H__
+
+/* Device Capabilities Registers */
+enum {
+ MHL_DCAP_DEV_STATE,
+ MHL_DCAP_MHL_VERSION,
+ MHL_DCAP_CAT,
+ MHL_DCAP_ADOPTER_ID_H,
+ MHL_DCAP_ADOPTER_ID_L,
+ MHL_DCAP_VID_LINK_MODE,
+ MHL_DCAP_AUD_LINK_MODE,
+ MHL_DCAP_VIDEO_TYPE,
+ MHL_DCAP_LOG_DEV_MAP,
+ MHL_DCAP_BANDWIDTH,
+ MHL_DCAP_FEATURE_FLAG,
+ MHL_DCAP_DEVICE_ID_H,
+ MHL_DCAP_DEVICE_ID_L,
+ MHL_DCAP_SCRATCHPAD_SIZE,
+ MHL_DCAP_INT_STAT_SIZE,
+ MHL_DCAP_RESERVED,
+ MHL_DCAP_SIZE
+};
+
+#define MHL_DCAP_CAT_SINK 0x01
+#define MHL_DCAP_CAT_SOURCE 0x02
+#define MHL_DCAP_CAT_POWER 0x10
+#define MHL_DCAP_CAT_PLIM(x) ((x) << 5)
+
+#define MHL_DCAP_VID_LINK_RGB444 0x01
+#define MHL_DCAP_VID_LINK_YCBCR444 0x02
+#define MHL_DCAP_VID_LINK_YCBCR422 0x04
+#define MHL_DCAP_VID_LINK_PPIXEL 0x08
+#define MHL_DCAP_VID_LINK_ISLANDS 0x10
+#define MHL_DCAP_VID_LINK_VGA 0x20
+#define MHL_DCAP_VID_LINK_16BPP 0x40
+
+#define MHL_DCAP_AUD_LINK_2CH 0x01
+#define MHL_DCAP_AUD_LINK_8CH 0x02
+
+#define MHL_DCAP_VT_GRAPHICS 0x00
+#define MHL_DCAP_VT_PHOTO 0x02
+#define MHL_DCAP_VT_CINEMA 0x04
+#define MHL_DCAP_VT_GAMES 0x08
+#define MHL_DCAP_SUPP_VT 0x80
+
+#define MHL_DCAP_LD_DISPLAY 0x01
+#define MHL_DCAP_LD_VIDEO 0x02
+#define MHL_DCAP_LD_AUDIO 0x04
+#define MHL_DCAP_LD_MEDIA 0x08
+#define MHL_DCAP_LD_TUNER 0x10
+#define MHL_DCAP_LD_RECORD 0x20
+#define MHL_DCAP_LD_SPEAKER 0x40
+#define MHL_DCAP_LD_GUI 0x80
+#define MHL_DCAP_LD_ALL 0xFF
+
+#define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01
+#define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02
+#define MHL_DCAP_FEATURE_SP_SUPPORT 0x04
+#define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08
+#define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10
+#define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40
+
+/* Extended Device Capabilities Registers */
+enum {
+ MHL_XDC_ECBUS_SPEEDS,
+ MHL_XDC_TMDS_SPEEDS,
+ MHL_XDC_ECBUS_ROLES,
+ MHL_XDC_LOG_DEV_MAPX,
+ MHL_XDC_SIZE
+};
+
+#define MHL_XDC_ECBUS_S_075 0x01
+#define MHL_XDC_ECBUS_S_8BIT 0x02
+#define MHL_XDC_ECBUS_S_12BIT 0x04
+#define MHL_XDC_ECBUS_D_150 0x10
+#define MHL_XDC_ECBUS_D_8BIT 0x20
+
+#define MHL_XDC_TMDS_000 0x00
+#define MHL_XDC_TMDS_150 0x01
+#define MHL_XDC_TMDS_300 0x02
+#define MHL_XDC_TMDS_600 0x04
+
+/* MHL_XDC_ECBUS_ROLES flags */
+#define MHL_XDC_DEV_HOST 0x01
+#define MHL_XDC_DEV_DEVICE 0x02
+#define MHL_XDC_DEV_CHARGER 0x04
+#define MHL_XDC_HID_HOST 0x08
+#define MHL_XDC_HID_DEVICE 0x10
+
+/* MHL_XDC_LOG_DEV_MAPX flags */
+#define MHL_XDC_LD_PHONE 0x01
+
+/* Device Status Registers */
+enum {
+ MHL_DST_CONNECTED_RDY,
+ MHL_DST_LINK_MODE,
+ MHL_DST_VERSION,
+ MHL_DST_SIZE
+};
+
+/* Offset of DEVSTAT registers */
+#define MHL_DST_OFFSET 0x30
+#define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name)
+
+#define MHL_DST_CONN_DCAP_RDY 0x01
+#define MHL_DST_CONN_XDEVCAPP_SUPP 0x02
+#define MHL_DST_CONN_POW_STAT 0x04
+#define MHL_DST_CONN_PLIM_STAT_MASK 0x38
+
+#define MHL_DST_LM_CLK_MODE_MASK 0x07
+#define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02
+#define MHL_DST_LM_CLK_MODE_NORMAL 0x03
+#define MHL_DST_LM_PATH_EN_MASK 0x08
+#define MHL_DST_LM_PATH_ENABLED 0x08
+#define MHL_DST_LM_PATH_DISABLED 0x00
+#define MHL_DST_LM_MUTED_MASK 0x10
+
+/* Extended Device Status Registers */
+enum {
+ MHL_XDS_CURR_ECBUS_MODE,
+ MHL_XDS_AVLINK_MODE_STATUS,
+ MHL_XDS_AVLINK_MODE_CONTROL,
+ MHL_XDS_MULTI_SINK_STATUS,
+ MHL_XDS_SIZE
+};
+
+/* Offset of XDEVSTAT registers */
+#define MHL_XDS_OFFSET 0x90
+#define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name)
+
+/* MHL_XDS_REG_CURR_ECBUS_MODE flags */
+#define MHL_XDS_SLOT_MODE_8BIT 0x00
+#define MHL_XDS_SLOT_MODE_6BIT 0x01
+#define MHL_XDS_ECBUS_S 0x04
+#define MHL_XDS_ECBUS_D 0x08
+
+#define MHL_XDS_LINK_CLOCK_75MHZ 0x00
+#define MHL_XDS_LINK_CLOCK_150MHZ 0x10
+#define MHL_XDS_LINK_CLOCK_300MHZ 0x20
+#define MHL_XDS_LINK_CLOCK_600MHZ 0x30
+
+#define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00
+#define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01
+#define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02
+#define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03
+
+#define MHL_XDS_LINK_RATE_1_5_GBPS 0x00
+#define MHL_XDS_LINK_RATE_3_0_GBPS 0x01
+#define MHL_XDS_LINK_RATE_6_0_GBPS 0x02
+#define MHL_XDS_ATT_CAPABLE 0x08
+
+#define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01
+#define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04
+#define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10
+#define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40
+
+/* Interrupt Registers */
+enum {
+ MHL_INT_RCHANGE,
+ MHL_INT_DCHANGE,
+ MHL_INT_SIZE
+};
+
+/* Offset of DEVSTAT registers */
+#define MHL_INT_OFFSET 0x20
+#define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name)
+
+#define MHL_INT_RC_DCAP_CHG 0x01
+#define MHL_INT_RC_DSCR_CHG 0x02
+#define MHL_INT_RC_REQ_WRT 0x04
+#define MHL_INT_RC_GRT_WRT 0x08
+#define MHL_INT_RC_3D_REQ 0x10
+#define MHL_INT_RC_FEAT_REQ 0x20
+#define MHL_INT_RC_FEAT_COMPLETE 0x40
+
+#define MHL_INT_DC_EDID_CHG 0x02
+
+enum {
+ MHL_ACK = 0x33, /* Command or Data byte acknowledge */
+ MHL_NACK = 0x34, /* Command or Data byte not acknowledge */
+ MHL_ABORT = 0x35, /* Transaction abort */
+ MHL_WRITE_STAT = 0xe0, /* Write one status register */
+ MHL_SET_INT = 0x60, /* Write one interrupt register */
+ MHL_READ_DEVCAP_REG = 0x61, /* Read one register */
+ MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */
+ MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */
+ MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */
+ MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */
+ MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */
+ MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */
+ MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */
+ MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */
+ MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */
+ MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */
+ MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */
+ MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */
+ MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */
+ MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */
+ /* let the rest of these float, they are software specific */
+ MHL_READ_EDID_BLOCK,
+ MHL_SEND_3D_REQ_OR_FEAT_REQ,
+ MHL_READ_DEVCAP,
+ MHL_READ_XDEVCAP
+};
+
+/* MSC message types */
+enum {
+ MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */
+ MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */
+ MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */
+ MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */
+ MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */
+ MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */
+ MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */
+ MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */
+ MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */
+ MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */
+ MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */
+ MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */
+ MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */
+ MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */
+ MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */
+ MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */
+ MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */
+ MHL_MSC_MSG_BIST_TRIGGER = 0x60,
+ MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61,
+ MHL_MSC_MSG_BIST_READY = 0x62,
+ MHL_MSC_MSG_BIST_STOP = 0x63,
+};
+
+/* RAP action codes */
+#define MHL_RAP_POLL 0x00 /* Just do an ack */
+#define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */
+#define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */
+#define MHL_RAP_CBUS_MODE_DOWN 0x20
+#define MHL_RAP_CBUS_MODE_UP 0x21
+
+/* RAPK status codes */
+#define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */
+#define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */
+#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */
+#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */
+
+/*
+ * Error status codes for RCPE messages
+ */
+/* No error. (Not allowed in RCPE messages) */
+#define MHL_RCPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized key code */
+#define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
+/* Responder busy. Initiator may retry message */
+#define MHL_RCPE_STATUS_BUSY 0x02
+
+/*
+ * Error status codes for RBPE messages
+ */
+/* No error. (Not allowed in RBPE messages) */
+#define MHL_RBPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized button code */
+#define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01
+/* Responder busy. Initiator may retry message */
+#define MHL_RBPE_STATUS_BUSY 0x02
+
+/*
+ * Error status codes for UCPE messages
+ */
+/* No error. (Not allowed in UCPE messages) */
+#define MHL_UCPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized key code */
+#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
+
+#endif /* __MHL_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 672644031bd5..192016e2b518 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -57,11 +57,11 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <uapi/drm/drm.h>
#include <uapi/drm/drm_mode.h>
@@ -76,6 +76,7 @@
#include <drm/drm_os_linux.h>
#include <drm/drm_sarea.h>
#include <drm/drm_vma_manager.h>
+#include <drm/drm_drv.h>
struct module;
@@ -135,35 +136,12 @@ struct dma_buf_attachment;
#define DRM_UT_PRIME 0x08
#define DRM_UT_ATOMIC 0x10
#define DRM_UT_VBL 0x20
-
-extern __printf(6, 7)
-void drm_dev_printk(const struct device *dev, const char *level,
- unsigned int category, const char *function_name,
- const char *prefix, const char *format, ...);
-
-extern __printf(3, 4)
-void drm_printk(const char *level, unsigned int category,
- const char *format, ...);
+#define DRM_UT_STATE 0x40
/***********************************************************************/
/** \name DRM template customization defaults */
/*@{*/
-/* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP 0x1
-#define DRIVER_LEGACY 0x2
-#define DRIVER_PCI_DMA 0x8
-#define DRIVER_SG 0x10
-#define DRIVER_HAVE_DMA 0x20
-#define DRIVER_HAVE_IRQ 0x40
-#define DRIVER_IRQ_SHARED 0x80
-#define DRIVER_GEM 0x1000
-#define DRIVER_MODESET 0x2000
-#define DRIVER_PRIME 0x4000
-#define DRIVER_RENDER 0x8000
-#define DRIVER_ATOMIC 0x10000
-#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
-
/***********************************************************************/
/** \name Macros to make printk easier */
/*@{*/
@@ -306,6 +284,27 @@ void drm_printk(const char *level, unsigned int category,
#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
+/* Format strings and argument splitters to simplify printing
+ * various "complex" objects
+ */
+#define DRM_MODE_FMT "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x"
+#define DRM_MODE_ARG(m) \
+ (m)->base.id, (m)->name, (m)->vrefresh, (m)->clock, \
+ (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \
+ (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \
+ (m)->type, (m)->flags
+
+#define DRM_RECT_FMT "%dx%d%+d%+d"
+#define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1
+
+/* for rect's in fixed-point format: */
+#define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u"
+#define DRM_RECT_FP_ARG(r) \
+ drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \
+ drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \
+ (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \
+ (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10
+
/*@}*/
/***********************************************************************/
@@ -362,7 +361,7 @@ struct drm_ioctl_desc {
struct drm_pending_event {
struct completion *completion;
struct drm_event *event;
- struct fence *fence;
+ struct dma_fence *fence;
struct list_head link;
struct list_head pending_link;
struct drm_file *file_priv;
@@ -458,263 +457,6 @@ struct drm_lock_data {
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
-/**
- * DRM driver structure. This structure represent the common code for
- * a family of cards. There will one drm_device for each card present
- * in this family
- */
-struct drm_driver {
- int (*load) (struct drm_device *, unsigned long flags);
- int (*firstopen) (struct drm_device *);
- int (*open) (struct drm_device *, struct drm_file *);
- void (*preclose) (struct drm_device *, struct drm_file *file_priv);
- void (*postclose) (struct drm_device *, struct drm_file *);
- void (*lastclose) (struct drm_device *);
- int (*unload) (struct drm_device *);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
- int (*set_busid)(struct drm_device *dev, struct drm_master *master);
-
- /**
- * get_vblank_counter - get raw hardware vblank counter
- * @dev: DRM device
- * @pipe: counter to fetch
- *
- * Driver callback for fetching a raw hardware vblank counter for @crtc.
- * If a device doesn't have a hardware counter, the driver can simply
- * use drm_vblank_no_hw_counter() function. The DRM core will account for
- * missed vblank events while interrupts where disabled based on system
- * timestamps.
- *
- * Wraparound handling and loss of events due to modesetting is dealt
- * with in the DRM core code.
- *
- * RETURNS
- * Raw vblank counter value.
- */
- u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * enable_vblank - enable vblank interrupt events
- * @dev: DRM device
- * @pipe: which irq to enable
- *
- * Enable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, the driver should use the
- * drm_vblank_no_hw_counter() function that keeps a virtual counter.
- *
- * RETURNS
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
- int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * disable_vblank - disable vblank interrupt events
- * @dev: DRM device
- * @pipe: which irq to enable
- *
- * Disable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, the driver should use the
- * drm_vblank_no_hw_counter() function that keeps a virtual counter.
- */
- void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * Called by \c drm_device_is_agp. Typically used to determine if a
- * card is really attached to AGP or not.
- *
- * \param dev DRM device handle
- *
- * \returns
- * One of three values is returned depending on whether or not the
- * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
- * (return of 1), or may or may not be AGP (return of 2).
- */
- int (*device_is_agp) (struct drm_device *dev);
-
- /**
- * Called by vblank timestamping code.
- *
- * Return the current display scanout position from a crtc, and an
- * optional accurate ktime_get timestamp of when position was measured.
- *
- * \param dev DRM device.
- * \param pipe Id of the crtc to query.
- * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
- * \param *vpos Target location for current vertical scanout position.
- * \param *hpos Target location for current horizontal scanout position.
- * \param *stime Target location for timestamp taken immediately before
- * scanout position query. Can be NULL to skip timestamp.
- * \param *etime Target location for timestamp taken immediately after
- * scanout position query. Can be NULL to skip timestamp.
- * \param mode Current display timings.
- *
- * Returns vpos as a positive number while in active scanout area.
- * Returns vpos as a negative number inside vblank, counting the number
- * of scanlines to go until end of vblank, e.g., -1 means "one scanline
- * until start of active scanout / end of vblank."
- *
- * \return Flags, or'ed together as follows:
- *
- * DRM_SCANOUTPOS_VALID = Query successful.
- * DRM_SCANOUTPOS_INVBL = Inside vblank.
- * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
- * this flag means that returned position may be offset by a constant
- * but unknown small number of scanlines wrt. real scanout position.
- *
- */
- int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
- /**
- * Called by \c drm_get_last_vbltimestamp. Should return a precise
- * timestamp when the most recent VBLANK interval ended or will end.
- *
- * Specifically, the timestamp in @vblank_time should correspond as
- * closely as possible to the time when the first video scanline of
- * the video frame after the end of VBLANK will start scanning out,
- * the time immediately after end of the VBLANK interval. If the
- * @crtc is currently inside VBLANK, this will be a time in the future.
- * If the @crtc is currently scanning out a frame, this will be the
- * past start time of the current scanout. This is meant to adhere
- * to the OpenML OML_sync_control extension specification.
- *
- * \param dev dev DRM device handle.
- * \param pipe crtc for which timestamp should be returned.
- * \param *max_error Maximum allowable timestamp error in nanoseconds.
- * Implementation should strive to provide timestamp
- * with an error of at most *max_error nanoseconds.
- * Returns true upper bound on error for timestamp.
- * \param *vblank_time Target location for returned vblank timestamp.
- * \param flags 0 = Defaults, no special treatment needed.
- * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
- * irq handler. Some drivers need to apply some workarounds
- * for gpu-specific vblank irq quirks if flag is set.
- *
- * \returns
- * Zero if timestamping isn't supported in current display mode or a
- * negative number on failure. A positive status code on success,
- * which describes how the vblank_time timestamp was computed.
- */
- int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
- int *max_error,
- struct timeval *vblank_time,
- unsigned flags);
-
- /* these have to be filled in */
-
- irqreturn_t(*irq_handler) (int irq, void *arg);
- void (*irq_preinstall) (struct drm_device *dev);
- int (*irq_postinstall) (struct drm_device *dev);
- void (*irq_uninstall) (struct drm_device *dev);
-
- /* Master routines */
- int (*master_create)(struct drm_device *dev, struct drm_master *master);
- void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
- /**
- * master_set is called whenever the minor master is set.
- * master_drop is called whenever the minor master is dropped.
- */
-
- int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
- bool from_open);
- void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
-
- int (*debugfs_init)(struct drm_minor *minor);
- void (*debugfs_cleanup)(struct drm_minor *minor);
-
- /**
- * @gem_free_object: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * @gem_free_object_unlocked instead.
- */
- void (*gem_free_object) (struct drm_gem_object *obj);
-
- /**
- * @gem_free_object_unlocked: deconstructor for drm_gem_objects
- *
- * This is for drivers which are not encumbered with dev->struct_mutex
- * legacy locking schemes. Use this hook instead of @gem_free_object.
- */
- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
-
- int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
- void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
- * Hook for allocating the GEM object struct, for use by core
- * helpers.
- */
- struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
- size_t size);
-
- /* prime: */
- /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
- int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t handle, uint32_t flags, int *prime_fd);
- /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
- int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
- int prime_fd, uint32_t *handle);
- /* export GEM -> dmabuf */
- struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
- /* import dmabuf -> GEM */
- struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
- struct dma_buf *dma_buf);
- /* low-level interface used by drm_gem_prime_{import,export} */
- int (*gem_prime_pin)(struct drm_gem_object *obj);
- void (*gem_prime_unpin)(struct drm_gem_object *obj);
- struct reservation_object * (*gem_prime_res_obj)(
- struct drm_gem_object *obj);
- struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
- struct drm_gem_object *(*gem_prime_import_sg_table)(
- struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
- void *(*gem_prime_vmap)(struct drm_gem_object *obj);
- void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
- int (*gem_prime_mmap)(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
- /* vga arb irq handler */
- void (*vgaarb_irq)(struct drm_device *dev, bool state);
-
- /* dumb alloc support */
- int (*dumb_create)(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
- int (*dumb_map_offset)(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
- int (*dumb_destroy)(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
-
- /* Driver private ops for this object */
- const struct vm_operations_struct *gem_vm_ops;
-
- int major;
- int minor;
- int patchlevel;
- char *name;
- char *desc;
- char *date;
-
- u32 driver_features;
- int dev_priv_size;
- const struct drm_ioctl_desc *ioctls;
- int num_ioctls;
- const struct file_operations *fops;
-
- /* List of devices hanging off this driver with stealth attach. */
- struct list_head legacy_dev_list;
-};
-
enum drm_minor_type {
DRM_MINOR_PRIMARY,
DRM_MINOR_CONTROL,
@@ -941,8 +683,13 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+#else
+/* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */
+#define drm_compat_ioctl NULL
+#endif
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
/* File Operations (drm_fops.c) */
@@ -980,15 +727,6 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
-/* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
-
-/* drm_drv.c */
-void drm_put_dev(struct drm_device *dev);
-void drm_unplug_dev(struct drm_device *dev);
-extern unsigned int drm_debug;
-
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
extern int drm_debugfs_create_files(const struct drm_info_list *files,
@@ -1041,19 +779,6 @@ extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
- struct device *parent);
-int drm_dev_init(struct drm_device *dev,
- struct drm_driver *driver,
- struct device *parent);
-void drm_dev_ref(struct drm_device *dev);
-void drm_dev_unref(struct drm_device *dev);
-int drm_dev_register(struct drm_device *dev, unsigned long flags);
-void drm_dev_unregister(struct drm_device *dev);
-
-struct drm_minor *drm_minor_acquire(unsigned int minor_id);
-void drm_minor_release(struct drm_minor *minor);
-
/*@}*/
/* PCI section */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9701f2dfb784..d6d241f63b9f 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,6 +144,7 @@ struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state;
struct drm_crtc_commit *commit;
+ s64 __user *out_fence_ptr;
};
struct __drm_connnectors_state {
@@ -153,6 +154,7 @@ struct __drm_connnectors_state {
/**
* struct drm_atomic_state - the global state object for atomic updates
+ * @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
@@ -164,6 +166,8 @@ struct __drm_connnectors_state {
* @acquire_ctx: acquire context for this atomic modeset state update
*/
struct drm_atomic_state {
+ struct kref ref;
+
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
@@ -193,7 +197,33 @@ static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
-void drm_atomic_state_free(struct drm_atomic_state *state);
+
+/**
+ * drm_atomic_state_get - acquire a reference to the atomic state
+ * @state: The atomic state
+ *
+ * Returns a new reference to the @state
+ */
+static inline struct drm_atomic_state *
+drm_atomic_state_get(struct drm_atomic_state *state)
+{
+ kref_get(&state->ref);
+ return state;
+}
+
+void __drm_atomic_state_free(struct kref *ref);
+
+/**
+ * drm_atomic_state_put - release a reference to the atomic state
+ * @state: The atomic state
+ *
+ * This releases a reference to @state which is freed after removing the
+ * final reference. No locking required and callable from any context.
+ */
+static inline void drm_atomic_state_put(struct drm_atomic_state *state)
+{
+ kref_put(&state->ref, __drm_atomic_state_free);
+}
int __must_check
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
@@ -316,6 +346,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
+void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence);
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
@@ -335,6 +367,14 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
+void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
+
+#ifdef CONFIG_DEBUG_FS
+struct drm_minor;
+int drm_atomic_debugfs_init(struct drm_minor *minor);
+int drm_atomic_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
#define for_each_connector_in_state(__state, connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_connector && \
@@ -365,11 +405,20 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
*
* To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
* whether the state CRTC changed enough to need a full modeset cycle:
- * connectors_changed, mode_changed and active_change. This helper simply
+ * connectors_changed, mode_changed and active_changed. This helper simply
* combines these three to compute the overall need for a modeset for @state.
+ *
+ * The atomic helper code sets these booleans, but drivers can and should
+ * change them appropriately to accurately represent whether a modeset is
+ * really needed. In general, drivers should avoid full modesets whenever
+ * possible.
+ *
+ * For example if the CRTC mode has changed, and the hardware is able to enact
+ * the requested mode change without going through a full modeset, the driver
+ * should clear mode_changed during its ->atomic_check.
*/
static inline bool
-drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
+drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
{
return state->mode_changed || state->active_changed ||
state->connectors_changed;
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 36baa175de99..13221cf9b3eb 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -47,8 +47,14 @@ struct drm_atomic_state;
#define DRM_REFLECT_Y BIT(5)
#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations);
+static inline bool drm_rotation_90_or_270(unsigned int rotation)
+{
+ return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
+}
+
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+ unsigned int rotation,
+ unsigned int supported_rotations);
unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ac9d7d8e0e43..a9b95246e26e 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -37,6 +37,7 @@ struct drm_crtc;
struct drm_encoder;
struct drm_property;
struct drm_property_blob;
+struct drm_printer;
struct edid;
enum drm_connector_force {
@@ -194,10 +195,40 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info,
unsigned int num_formats);
/**
+ * struct drm_tv_connector_state - TV connector related states
+ * @subconnector: selected subconnector
+ * @margins: left/right/top/bottom margins
+ * @mode: TV mode
+ * @brightness: brightness in percent
+ * @contrast: contrast in percent
+ * @flicker_reduction: flicker reduction in percent
+ * @overscan: overscan in percent
+ * @saturation: saturation in percent
+ * @hue: hue in percent
+ */
+struct drm_tv_connector_state {
+ enum drm_mode_subconnector subconnector;
+ struct {
+ unsigned int left;
+ unsigned int right;
+ unsigned int top;
+ unsigned int bottom;
+ } margins;
+ unsigned int mode;
+ unsigned int brightness;
+ unsigned int contrast;
+ unsigned int flicker_reduction;
+ unsigned int overscan;
+ unsigned int saturation;
+ unsigned int hue;
+};
+
+/**
* struct drm_connector_state - mutable connector state
* @connector: backpointer to the connector
* @best_encoder: can be used by helpers and drivers to select the encoder
* @state: backpointer to global drm_atomic_state
+ * @tv: TV connector state
*/
struct drm_connector_state {
struct drm_connector *connector;
@@ -213,6 +244,8 @@ struct drm_connector_state {
struct drm_encoder *best_encoder;
struct drm_atomic_state *state;
+
+ struct drm_tv_connector_state tv;
};
/**
@@ -261,6 +294,9 @@ struct drm_connector_funcs {
* connector due to a user request. force can be used by the driver to
* avoid expensive, destructive operations during automated probing.
*
+ * This callback is optional, if not implemented the connector will be
+ * considered as always being attached.
+ *
* FIXME:
*
* Note that this hook is only called by the probe helper. It's not in
@@ -481,6 +517,18 @@ struct drm_connector_funcs {
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_connector_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_connector_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_connector_state *state);
};
/* mode specified on the command line */
@@ -762,6 +810,30 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
/**
+ * struct drm_tile_group - Tile group metadata
+ * @refcount: reference count
+ * @dev: DRM device
+ * @id: tile group id exposed to userspace
+ * @group_data: Sink-private data identifying this group
+ *
+ * @group_data corresponds to displayid vend/prod/serial for external screens
+ * with an EDID.
+ */
+struct drm_tile_group {
+ struct kref refcount;
+ struct drm_device *dev;
+ int id;
+ u8 group_data[8];
+};
+
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8]);
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8]);
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg);
+
+/**
* drm_for_each_connector - iterate over all connectors
* @connector: the loop cursor
* @dev: the DRM device
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 0aa292526567..946672f97e1e 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -28,7 +28,6 @@
#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/idr.h>
#include <linux/fb.h>
#include <linux/hdmi.h>
#include <linux/media-bus-format.h>
@@ -47,13 +46,16 @@
#include <drm/drm_plane.h>
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
+#include <drm/drm_debugfs_crc.h>
+#include <drm/drm_mode_config.h>
struct drm_device;
struct drm_mode_set;
struct drm_file;
struct drm_clip_rect;
+struct drm_printer;
struct device_node;
-struct fence;
+struct dma_fence;
struct edid;
static inline int64_t U642I64(uint64_t val)
@@ -65,14 +67,6 @@ static inline uint64_t I642U64(int64_t val)
return (uint64_t)*((uint64_t *)&val);
}
-/* data corresponds to displayid vend/prod/serial */
-struct drm_tile_group {
- struct kref refcount;
- struct drm_device *dev;
- int id;
- u8 group_data[8];
-};
-
struct drm_crtc;
struct drm_encoder;
struct drm_pending_vblank_event;
@@ -116,6 +110,11 @@ struct drm_plane_helper_funcs;
* never return in a failure from the ->atomic_check callback. Userspace assumes
* that a DPMS On will always succeed. In other words: @enable controls resource
* assignment, @active controls the actual hardware state.
+ *
+ * The three booleans active_changed, connectors_changed and mode_changed are
+ * intended to indicate whether a full modeset is needed, rather than strictly
+ * describing what has changed in a commit.
+ * See also: drm_atomic_crtc_needs_modeset()
*/
struct drm_crtc_state {
struct drm_crtc *crtc;
@@ -564,6 +563,42 @@ struct drm_crtc_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_crtc *crtc);
+
+ /**
+ * @set_crc_source:
+ *
+ * Changes the source of CRC checksums of frames at the request of
+ * userspace, typically for testing purposes. The sources available are
+ * specific of each driver and a %NULL value indicates that CRC
+ * generation is to be switched off.
+ *
+ * When CRC generation is enabled, the driver should call
+ * drm_crtc_add_crc_entry() at each frame, providing any information
+ * that characterizes the frame contents in the crcN arguments, as
+ * provided from the configured source. Drivers must accept a "auto"
+ * source name that will select a default source for this CRTC.
+ *
+ * This callback is optional if the driver does not support any CRC
+ * generation functionality.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*set_crc_source)(struct drm_crtc *crtc, const char *source,
+ size_t *values_cnt);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_crtc_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_crtc_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_crtc_state *state);
};
/**
@@ -680,660 +715,90 @@ struct drm_crtc {
* context.
*/
struct drm_modeset_acquire_ctx *acquire_ctx;
-};
-/**
- * struct drm_mode_set - new values for a CRTC config change
- * @fb: framebuffer to use for new config
- * @crtc: CRTC whose configuration we're about to change
- * @mode: mode timings to use
- * @x: position of this CRTC relative to @fb
- * @y: position of this CRTC relative to @fb
- * @connectors: array of connectors to drive with this CRTC if possible
- * @num_connectors: size of @connectors array
- *
- * Represents a single crtc the connectors that it drives with what mode
- * and from which framebuffer it scans out from.
- *
- * This is used to set modes.
- */
-struct drm_mode_set {
- struct drm_framebuffer *fb;
- struct drm_crtc *crtc;
- struct drm_display_mode *mode;
-
- uint32_t x;
- uint32_t y;
-
- struct drm_connector **connectors;
- size_t num_connectors;
-};
-
-/**
- * struct drm_mode_config_funcs - basic driver provided mode setting functions
- *
- * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
- * involve drivers.
- */
-struct drm_mode_config_funcs {
+#ifdef CONFIG_DEBUG_FS
/**
- * @fb_create:
- *
- * Create a new framebuffer object. The core does basic checks on the
- * requested metadata, but most of that is left to the driver. See
- * struct &drm_mode_fb_cmd2 for details.
- *
- * If the parameters are deemed valid and the backing storage objects in
- * the underlying memory manager all exist, then the driver allocates
- * a new &drm_framebuffer structure, subclassed to contain
- * driver-specific information (like the internal native buffer object
- * references). It also needs to fill out all relevant metadata, which
- * should be done by calling drm_helper_mode_fill_fb_struct().
+ * @debugfs_entry:
*
- * The initialization is finalized by calling drm_framebuffer_init(),
- * which registers the framebuffer and makes it accessible to other
- * threads.
- *
- * RETURNS:
- *
- * A new framebuffer with an initial reference count of 1 or a negative
- * error code encoded with ERR_PTR().
+ * Debugfs directory for this CRTC.
*/
- struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
- struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd);
+ struct dentry *debugfs_entry;
/**
- * @output_poll_changed:
- *
- * Callback used by helpers to inform the driver of output configuration
- * changes.
+ * @crc:
*
- * Drivers implementing fbdev emulation with the helpers can call
- * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
- * helper of output changes.
- *
- * FIXME:
- *
- * Except that there's no vtable for device-level helper callbacks
- * there's no reason this is a core function.
+ * Configuration settings of CRC capture.
*/
- void (*output_poll_changed)(struct drm_device *dev);
+ struct drm_crtc_crc crc;
+#endif
/**
- * @atomic_check:
- *
- * This is the only hook to validate an atomic modeset update. This
- * function must reject any modeset and state changes which the hardware
- * or driver doesn't support. This includes but is of course not limited
- * to:
- *
- * - Checking that the modes, framebuffers, scaling and placement
- * requirements and so on are within the limits of the hardware.
- *
- * - Checking that any hidden shared resources are not oversubscribed.
- * This can be shared PLLs, shared lanes, overall memory bandwidth,
- * display fifo space (where shared between planes or maybe even
- * CRTCs).
- *
- * - Checking that virtualized resources exported to userspace are not
- * oversubscribed. For various reasons it can make sense to expose
- * more planes, crtcs or encoders than which are physically there. One
- * example is dual-pipe operations (which generally should be hidden
- * from userspace if when lockstepped in hardware, exposed otherwise),
- * where a plane might need 1 hardware plane (if it's just on one
- * pipe), 2 hardware planes (when it spans both pipes) or maybe even
- * shared a hardware plane with a 2nd plane (if there's a compatible
- * plane requested on the area handled by the other pipe).
- *
- * - Check that any transitional state is possible and that if
- * requested, the update can indeed be done in the vblank period
- * without temporarily disabling some functions.
- *
- * - Check any other constraints the driver or hardware might have.
- *
- * - This callback also needs to correctly fill out the &drm_crtc_state
- * in this update to make sure that drm_atomic_crtc_needs_modeset()
- * reflects the nature of the possible update and returns true if and
- * only if the update cannot be applied without tearing within one
- * vblank on that CRTC. The core uses that information to reject
- * updates which require a full modeset (i.e. blanking the screen, or
- * at least pausing updates for a substantial amount of time) if
- * userspace has disallowed that in its request.
- *
- * - The driver also does not need to repeat basic input validation
- * like done for the corresponding legacy entry points. The core does
- * that before calling this hook.
- *
- * See the documentation of @atomic_commit for an exhaustive list of
- * error conditions which don't have to be checked at the
- * ->atomic_check() stage?
- *
- * See the documentation for struct &drm_atomic_state for how exactly
- * an atomic modeset update is described.
- *
- * Drivers using the atomic helpers can implement this hook using
- * drm_atomic_helper_check(), or one of the exported sub-functions of
- * it.
+ * @fence_context:
*
- * RETURNS:
- *
- * 0 on success or one of the below negative error codes:
- *
- * - -EINVAL, if any of the above constraints are violated.
- *
- * - -EDEADLK, when returned from an attempt to acquire an additional
- * &drm_modeset_lock through drm_modeset_lock().
- *
- * - -ENOMEM, if allocating additional state sub-structures failed due
- * to lack of memory.
- *
- * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
- * This can either be due to a pending signal, or because the driver
- * needs to completely bail out to recover from an exceptional
- * situation like a GPU hang. From a userspace point all errors are
- * treated equally.
+ * timeline context used for fence operations.
*/
- int (*atomic_check)(struct drm_device *dev,
- struct drm_atomic_state *state);
+ unsigned int fence_context;
/**
- * @atomic_commit:
- *
- * This is the only hook to commit an atomic modeset update. The core
- * guarantees that @atomic_check has been called successfully before
- * calling this function, and that nothing has been changed in the
- * interim.
- *
- * See the documentation for struct &drm_atomic_state for how exactly
- * an atomic modeset update is described.
- *
- * Drivers using the atomic helpers can implement this hook using
- * drm_atomic_helper_commit(), or one of the exported sub-functions of
- * it.
- *
- * Nonblocking commits (as indicated with the nonblock parameter) must
- * do any preparatory work which might result in an unsuccessful commit
- * in the context of this callback. The only exceptions are hardware
- * errors resulting in -EIO. But even in that case the driver must
- * ensure that the display pipe is at least running, to avoid
- * compositors crashing when pageflips don't work. Anything else,
- * specifically committing the update to the hardware, should be done
- * without blocking the caller. For updates which do not require a
- * modeset this must be guaranteed.
- *
- * The driver must wait for any pending rendering to the new
- * framebuffers to complete before executing the flip. It should also
- * wait for any pending rendering from other drivers if the underlying
- * buffer is a shared dma-buf. Nonblocking commits must not wait for
- * rendering in the context of this callback.
- *
- * An application can request to be notified when the atomic commit has
- * completed. These events are per-CRTC and can be distinguished by the
- * CRTC index supplied in &drm_event to userspace.
- *
- * The drm core will supply a struct &drm_event in the event
- * member of each CRTC's &drm_crtc_state structure. See the
- * documentation for &drm_crtc_state for more details about the precise
- * semantics of this event.
- *
- * NOTE:
- *
- * Drivers are not allowed to shut down any display pipe successfully
- * enabled through an atomic commit on their own. Doing so can result in
- * compositors crashing if a page flip is suddenly rejected because the
- * pipe is off.
- *
- * RETURNS:
- *
- * 0 on success or one of the below negative error codes:
- *
- * - -EBUSY, if a nonblocking updated is requested and there is
- * an earlier updated pending. Drivers are allowed to support a queue
- * of outstanding updates, but currently no driver supports that.
- * Note that drivers must wait for preceding updates to complete if a
- * synchronous update is requested, they are not allowed to fail the
- * commit in that case.
- *
- * - -ENOMEM, if the driver failed to allocate memory. Specifically
- * this can happen when trying to pin framebuffers, which must only
- * be done when committing the state.
- *
- * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
- * that the driver has run out of vram, iommu space or similar GPU
- * address space needed for framebuffer.
- *
- * - -EIO, if the hardware completely died.
+ * @fence_lock:
*
- * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
- * This can either be due to a pending signal, or because the driver
- * needs to completely bail out to recover from an exceptional
- * situation like a GPU hang. From a userspace point of view all errors are
- * treated equally.
- *
- * This list is exhaustive. Specifically this hook is not allowed to
- * return -EINVAL (any invalid requests should be caught in
- * @atomic_check) or -EDEADLK (this function must not acquire
- * additional modeset locks).
- */
- int (*atomic_commit)(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock);
-
- /**
- * @atomic_state_alloc:
- *
- * This optional hook can be used by drivers that want to subclass struct
- * &drm_atomic_state to be able to track their own driver-private global
- * state easily. If this hook is implemented, drivers must also
- * implement @atomic_state_clear and @atomic_state_free.
- *
- * RETURNS:
- *
- * A new &drm_atomic_state on success or NULL on failure.
+ * spinlock to protect the fences in the fence_context.
*/
- struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
+ spinlock_t fence_lock;
/**
- * @atomic_state_clear:
- *
- * This hook must clear any driver private state duplicated into the
- * passed-in &drm_atomic_state. This hook is called when the caller
- * encountered a &drm_modeset_lock deadlock and needs to drop all
- * already acquired locks as part of the deadlock avoidance dance
- * implemented in drm_modeset_lock_backoff().
+ * @fence_seqno:
*
- * Any duplicated state must be invalidated since a concurrent atomic
- * update might change it, and the drm atomic interfaces always apply
- * updates as relative changes to the current state.
- *
- * Drivers that implement this must call drm_atomic_state_default_clear()
- * to clear common state.
+ * Seqno variable used as monotonic counter for the fences
+ * created on the CRTC's timeline.
*/
- void (*atomic_state_clear)(struct drm_atomic_state *state);
+ unsigned long fence_seqno;
/**
- * @atomic_state_free:
- *
- * This hook needs driver private resources and the &drm_atomic_state
- * itself. Note that the core first calls drm_atomic_state_clear() to
- * avoid code duplicate between the clear and free hooks.
+ * @timeline_name:
*
- * Drivers that implement this must call drm_atomic_state_default_free()
- * to release common resources.
+ * The name of the CRTC's fence timeline.
*/
- void (*atomic_state_free)(struct drm_atomic_state *state);
+ char timeline_name[32];
};
/**
- * struct drm_mode_config - Mode configuration control structure
- * @mutex: mutex protecting KMS related lists and structures
- * @connection_mutex: ww mutex protecting connector state and routing
- * @acquire_ctx: global implicit acquire context used by atomic drivers for
- * legacy IOCTLs
- * @fb_lock: mutex to protect fb state and lists
- * @num_fb: number of fbs available
- * @fb_list: list of framebuffers available
- * @num_encoder: number of encoders on this device
- * @encoder_list: list of encoder objects
- * @num_overlay_plane: number of overlay planes on this device
- * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
- * @plane_list: list of plane objects
- * @num_crtc: number of CRTCs on this device
- * @crtc_list: list of CRTC objects
- * @property_list: list of property objects
- * @min_width: minimum pixel width on this device
- * @min_height: minimum pixel height on this device
- * @max_width: maximum pixel width on this device
- * @max_height: maximum pixel height on this device
- * @funcs: core driver provided mode setting functions
- * @fb_base: base address of the framebuffer
- * @poll_enabled: track polling support for this device
- * @poll_running: track polling status for this device
- * @delayed_event: track delayed poll uevent deliver for this device
- * @output_poll_work: delayed work for polling in process context
- * @property_blob_list: list of all the blob property objects
- * @blob_lock: mutex for blob property allocation and management
- * @*_property: core property tracking
- * @preferred_depth: preferred RBG pixel depth, used by fb helpers
- * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
- * @cursor_width: hint to userspace for max cursor width
- * @cursor_height: hint to userspace for max cursor height
- * @helper_private: mid-layer private data
+ * struct drm_mode_set - new values for a CRTC config change
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
*
- * Core mode resource tracking structure. All CRTC, encoders, and connectors
- * enumerated by the driver are added here, as are global properties. Some
- * global restrictions are also here, e.g. dimension restrictions.
+ * Represents a single crtc the connectors that it drives with what mode
+ * and from which framebuffer it scans out from.
+ *
+ * This is used to set modes.
*/
-struct drm_mode_config {
- struct mutex mutex; /* protects configuration (mode lists etc.) */
- struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
- struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
-
- /**
- * @idr_mutex:
- *
- * Mutex for KMS ID allocation and management. Protects both @crtc_idr
- * and @tile_idr.
- */
- struct mutex idr_mutex;
-
- /**
- * @crtc_idr:
- *
- * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
- * connector, modes - just makes life easier to have only one.
- */
- struct idr crtc_idr;
-
- /**
- * @tile_idr:
- *
- * Use this idr for allocating new IDs for tiled sinks like use in some
- * high-res DP MST screens.
- */
- struct idr tile_idr;
-
- struct mutex fb_lock; /* proctects global and per-file fb lists */
- int num_fb;
- struct list_head fb_list;
-
- /**
- * @num_connector: Number of connectors on this device.
- */
- int num_connector;
- /**
- * @connector_ida: ID allocator for connector indices.
- */
- struct ida connector_ida;
- /**
- * @connector_list: List of connector objects.
- */
- struct list_head connector_list;
- int num_encoder;
- struct list_head encoder_list;
-
- /*
- * Track # of overlay planes separately from # of total planes. By
- * default we only advertise overlay planes to userspace; if userspace
- * sets the "universal plane" capability bit, we'll go ahead and
- * expose all planes.
- */
- int num_overlay_plane;
- int num_total_plane;
- struct list_head plane_list;
-
- int num_crtc;
- struct list_head crtc_list;
-
- struct list_head property_list;
-
- int min_width, min_height;
- int max_width, max_height;
- const struct drm_mode_config_funcs *funcs;
- resource_size_t fb_base;
-
- /* output poll support */
- bool poll_enabled;
- bool poll_running;
- bool delayed_event;
- struct delayed_work output_poll_work;
-
- struct mutex blob_lock;
-
- /* pointers to standard properties */
- struct list_head property_blob_list;
- /**
- * @edid_property: Default connector property to hold the EDID of the
- * currently connected sink, if any.
- */
- struct drm_property *edid_property;
- /**
- * @dpms_property: Default connector property to control the
- * connector's DPMS state.
- */
- struct drm_property *dpms_property;
- /**
- * @path_property: Default connector property to hold the DP MST path
- * for the port.
- */
- struct drm_property *path_property;
- /**
- * @tile_property: Default connector property to store the tile
- * position of a tiled screen, for sinks which need to be driven with
- * multiple CRTCs.
- */
- struct drm_property *tile_property;
- /**
- * @plane_type_property: Default plane property to differentiate
- * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
- */
- struct drm_property *plane_type_property;
- /**
- * @rotation_property: Optional property for planes or CRTCs to specifiy
- * rotation.
- */
- struct drm_property *rotation_property;
- /**
- * @prop_src_x: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_x;
- /**
- * @prop_src_y: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_y;
- /**
- * @prop_src_w: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_w;
- /**
- * @prop_src_h: Default atomic plane property for the plane source
- * position in the connected &drm_framebuffer.
- */
- struct drm_property *prop_src_h;
- /**
- * @prop_crtc_x: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_x;
- /**
- * @prop_crtc_y: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_y;
- /**
- * @prop_crtc_w: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_w;
- /**
- * @prop_crtc_h: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
- */
- struct drm_property *prop_crtc_h;
- /**
- * @prop_fb_id: Default atomic plane property to specify the
- * &drm_framebuffer.
- */
- struct drm_property *prop_fb_id;
- /**
- * @prop_crtc_id: Default atomic plane property to specify the
- * &drm_crtc.
- */
- struct drm_property *prop_crtc_id;
- /**
- * @prop_active: Default atomic CRTC property to control the active
- * state, which is the simplified implementation for DPMS in atomic
- * drivers.
- */
- struct drm_property *prop_active;
- /**
- * @prop_mode_id: Default atomic CRTC property to set the mode for a
- * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
- * connectors must be of and active must be set to disabled, too.
- */
- struct drm_property *prop_mode_id;
-
- /**
- * @dvi_i_subconnector_property: Optional DVI-I property to
- * differentiate between analog or digital mode.
- */
- struct drm_property *dvi_i_subconnector_property;
- /**
- * @dvi_i_select_subconnector_property: Optional DVI-I property to
- * select between analog or digital mode.
- */
- struct drm_property *dvi_i_select_subconnector_property;
-
- /**
- * @tv_subconnector_property: Optional TV property to differentiate
- * between different TV connector types.
- */
- struct drm_property *tv_subconnector_property;
- /**
- * @tv_select_subconnector_property: Optional TV property to select
- * between different TV connector types.
- */
- struct drm_property *tv_select_subconnector_property;
- /**
- * @tv_mode_property: Optional TV property to select
- * the output TV mode.
- */
- struct drm_property *tv_mode_property;
- /**
- * @tv_left_margin_property: Optional TV property to set the left
- * margin.
- */
- struct drm_property *tv_left_margin_property;
- /**
- * @tv_right_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_right_margin_property;
- /**
- * @tv_top_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_top_margin_property;
- /**
- * @tv_bottom_margin_property: Optional TV property to set the right
- * margin.
- */
- struct drm_property *tv_bottom_margin_property;
- /**
- * @tv_brightness_property: Optional TV property to set the
- * brightness.
- */
- struct drm_property *tv_brightness_property;
- /**
- * @tv_contrast_property: Optional TV property to set the
- * contrast.
- */
- struct drm_property *tv_contrast_property;
- /**
- * @tv_flicker_reduction_property: Optional TV property to control the
- * flicker reduction mode.
- */
- struct drm_property *tv_flicker_reduction_property;
- /**
- * @tv_overscan_property: Optional TV property to control the overscan
- * setting.
- */
- struct drm_property *tv_overscan_property;
- /**
- * @tv_saturation_property: Optional TV property to set the
- * saturation.
- */
- struct drm_property *tv_saturation_property;
- /**
- * @tv_hue_property: Optional TV property to set the hue.
- */
- struct drm_property *tv_hue_property;
-
- /**
- * @scaling_mode_property: Optional connector property to control the
- * upscaling, mostly used for built-in panels.
- */
- struct drm_property *scaling_mode_property;
- /**
- * @aspect_ratio_property: Optional connector property to control the
- * HDMI infoframe aspect ratio setting.
- */
- struct drm_property *aspect_ratio_property;
- /**
- * @degamma_lut_property: Optional CRTC property to set the LUT used to
- * convert the framebuffer's colors to linear gamma.
- */
- struct drm_property *degamma_lut_property;
- /**
- * @degamma_lut_size_property: Optional CRTC property for the size of
- * the degamma LUT as supported by the driver (read-only).
- */
- struct drm_property *degamma_lut_size_property;
- /**
- * @ctm_property: Optional CRTC property to set the
- * matrix used to convert colors after the lookup in the
- * degamma LUT.
- */
- struct drm_property *ctm_property;
- /**
- * @gamma_lut_property: Optional CRTC property to set the LUT used to
- * convert the colors, after the CTM matrix, to the gamma space of the
- * connected screen.
- */
- struct drm_property *gamma_lut_property;
- /**
- * @gamma_lut_size_property: Optional CRTC property for the size of the
- * gamma LUT as supported by the driver (read-only).
- */
- struct drm_property *gamma_lut_size_property;
-
- /**
- * @suggested_x_property: Optional connector property with a hint for
- * the position of the output on the host's screen.
- */
- struct drm_property *suggested_x_property;
- /**
- * @suggested_y_property: Optional connector property with a hint for
- * the position of the output on the host's screen.
- */
- struct drm_property *suggested_y_property;
-
- /* dumb ioctl parameters */
- uint32_t preferred_depth, prefer_shadow;
-
- /**
- * @async_page_flip: Does this device support async flips on the primary
- * plane?
- */
- bool async_page_flip;
-
- /**
- * @allow_fb_modifiers:
- *
- * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
- */
- bool allow_fb_modifiers;
+struct drm_mode_set {
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ struct drm_display_mode *mode;
- /* cursor size */
- uint32_t cursor_width, cursor_height;
+ uint32_t x;
+ uint32_t y;
- struct drm_mode_config_helper_funcs *helper_private;
+ struct drm_connector **connectors;
+ size_t num_connectors;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
-extern __printf(6, 7)
+__printf(6, 7)
int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
-extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+void drm_crtc_cleanup(struct drm_crtc *crtc);
/**
* drm_crtc_index - find the index of a registered CRTC
@@ -1354,28 +819,17 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
* Given a registered CRTC, return the mask bit of that CRTC for an
* encoder's possible_crtcs field.
*/
-static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
+static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
{
return 1 << drm_crtc_index(crtc);
}
-extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
- int *hdisplay, int *vdisplay);
-extern int drm_crtc_force_disable(struct drm_crtc *crtc);
-extern int drm_crtc_force_disable_all(struct drm_device *dev);
-
-extern void drm_mode_config_init(struct drm_device *dev);
-extern void drm_mode_config_reset(struct drm_device *dev);
-extern void drm_mode_config_cleanup(struct drm_device *dev);
-
-extern int drm_mode_set_config_internal(struct drm_mode_set *set);
+void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay);
+int drm_crtc_force_disable(struct drm_crtc *crtc);
+int drm_crtc_force_disable_all(struct drm_device *dev);
-extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8]);
-extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8]);
-extern void drm_mode_put_tile_group(struct drm_device *dev,
- struct drm_tile_group *tg);
+int drm_mode_set_config_internal(struct drm_mode_set *set);
/* Helpers */
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
new file mode 100644
index 000000000000..7d63b1d4adb9
--- /dev/null
+++ b/include/drm/drm_debugfs_crc.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2016 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_DEBUGFS_CRC_H__
+#define __DRM_DEBUGFS_CRC_H__
+
+#define DRM_MAX_CRC_NR 10
+
+/**
+ * struct drm_crtc_crc_entry - entry describing a frame's content
+ * @has_frame_counter: whether the source was able to provide a frame number
+ * @frame: number of the frame this CRC is about, if @has_frame_counter is true
+ * @crc: array of values that characterize the frame
+ */
+struct drm_crtc_crc_entry {
+ bool has_frame_counter;
+ uint32_t frame;
+ uint32_t crcs[DRM_MAX_CRC_NR];
+};
+
+#define DRM_CRC_ENTRIES_NR 128
+
+/**
+ * struct drm_crtc_crc - data supporting CRC capture on a given CRTC
+ * @lock: protects the fields in this struct
+ * @source: name of the currently configured source of CRCs
+ * @opened: whether userspace has opened the data file for reading
+ * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
+ * @head: head of circular queue
+ * @tail: tail of circular queue
+ * @values_cnt: number of CRC values per entry, up to %DRM_MAX_CRC_NR
+ * @wq: workqueue used to synchronize reading and writing
+ */
+struct drm_crtc_crc {
+ spinlock_t lock;
+ const char *source;
+ bool opened;
+ struct drm_crtc_crc_entry *entries;
+ int head, tail;
+ size_t values_cnt;
+ wait_queue_head_t wq;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs);
+#else
+static inline int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs)
+{
+ return -EINVAL;
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* __DRM_DEBUGFS_CRC_H__ */
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
index e8a9dfd0e055..4c42db81fcb4 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -40,6 +40,8 @@
#define DP_DUAL_MODE_REV_TYPE2 0x00
#define DP_DUAL_MODE_TYPE_MASK 0xf0
#define DP_DUAL_MODE_TYPE_TYPE2 0xa0
+/* This field is marked reserved in dual mode spec, used in LSPCON */
+#define DP_DUAL_MODE_TYPE_HAS_DPCD 0x08
#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
#define DP_DUAL_IEEE_OUI_LEN 3
#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
@@ -55,6 +57,11 @@
#define DP_DUAL_MODE_CEC_ENABLE 0x01
#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
+/* LSPCON specific registers, defined by MCA */
+#define DP_DUAL_MODE_LSPCON_MODE_CHANGE 0x40
+#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41
+#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1
+
struct i2c_adapter;
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
@@ -63,6 +70,20 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
u8 offset, const void *buffer, size_t size);
/**
+ * enum drm_lspcon_mode
+ * @DRM_LSPCON_MODE_INVALID: No LSPCON.
+ * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON
+ * which drives DP++ to HDMI 1.4 conversion.
+ * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON
+ * which drives DP++ to HDMI 2.0 active conversion.
+ */
+enum drm_lspcon_mode {
+ DRM_LSPCON_MODE_INVALID,
+ DRM_LSPCON_MODE_LS,
+ DRM_LSPCON_MODE_PCON,
+};
+
+/**
* enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
* @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
* @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
@@ -70,6 +91,7 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
* @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
+ * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter
*/
enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_NONE,
@@ -78,6 +100,7 @@ enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_TYPE1_HDMI,
DRM_DP_DUAL_MODE_TYPE2_DVI,
DRM_DP_DUAL_MODE_TYPE2_HDMI,
+ DRM_DP_DUAL_MODE_LSPCON,
};
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
@@ -89,4 +112,8 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode *current_mode);
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode reqd_mode);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2a79882cb68e..55bbeb0ff594 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -690,6 +690,12 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
}
+static inline bool
+drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
+}
+
/*
* DisplayPort AUX channel
*/
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
new file mode 100644
index 000000000000..c4fc49583dc0
--- /dev/null
+++ b/include/drm/drm_drv.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * Copyright 2016 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_DRV_H_
+#define _DRM_DRV_H_
+
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_gem_object;
+struct drm_master;
+struct drm_minor;
+struct dma_buf_attachment;
+struct drm_display_mode;
+struct drm_mode_create_dumb;
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_LEGACY 0x2
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
+#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
+
+/**
+ * struct drm_driver - DRM driver structure
+ *
+ * This structure represent the common code for a family of cards. There will
+ * one drm_device for each card present in this family. It contains lots of
+ * vfunc entries, and a pile of those probably should be moved to more
+ * appropriate places like &drm_mode_config_funcs or into a new operations
+ * structure for GEM drivers.
+ */
+struct drm_driver {
+ int (*load) (struct drm_device *, unsigned long flags);
+ int (*firstopen) (struct drm_device *);
+ int (*open) (struct drm_device *, struct drm_file *);
+ void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+ void (*postclose) (struct drm_device *, struct drm_file *);
+ void (*lastclose) (struct drm_device *);
+ int (*unload) (struct drm_device *);
+ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+ int (*dma_quiescent) (struct drm_device *);
+ int (*context_dtor) (struct drm_device *dev, int context);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @get_vblank_counter:
+ *
+ * Driver callback for fetching a raw hardware vblank counter for the
+ * CRTC specified with the pipe argument. If a device doesn't have a
+ * hardware counter, the driver can simply use
+ * drm_vblank_no_hw_counter() function. The DRM core will account for
+ * missed vblank events while interrupts where disabled based on system
+ * timestamps.
+ *
+ * Wraparound handling and loss of events due to modesetting is dealt
+ * with in the DRM core code, as long as drivers call
+ * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
+ * enabling a CRTC.
+ *
+ * Returns:
+ *
+ * Raw vblank counter value.
+ */
+ u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @enable_vblank:
+ *
+ * Enable vblank interrupts for the CRTC specified with the pipe
+ * argument.
+ *
+ * Returns:
+ *
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+ int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @disable_vblank:
+ *
+ * Disable vblank interrupts for the CRTC specified with the pipe
+ * argument.
+ */
+ void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * @device_is_agp:
+ *
+ * Called by drm_device_is_agp(). Typically used to determine if a card
+ * is really attached to AGP or not.
+ *
+ * Returns:
+ *
+ * One of three values is returned depending on whether or not the
+ * card is absolutely not AGP (return of 0), absolutely is AGP
+ * (return of 1), or may or may not be AGP (return of 2).
+ */
+ int (*device_is_agp) (struct drm_device *dev);
+
+ /**
+ * @get_scanout_position:
+ *
+ * Called by vblank timestamping code.
+ *
+ * Returns the current display scanout position from a crtc, and an
+ * optional accurate ktime_get() timestamp of when position was
+ * measured. Note that this is a helper callback which is only used if a
+ * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the
+ * @get_vblank_timestamp callback.
+ *
+ * Parameters:
+ *
+ * dev:
+ * DRM device.
+ * pipe:
+ * Id of the crtc to query.
+ * flags:
+ * Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * vpos:
+ * Target location for current vertical scanout position.
+ * hpos:
+ * Target location for current horizontal scanout position.
+ * stime:
+ * Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * etime:
+ * Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
+ * mode:
+ * Current display timings.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * Returns:
+ *
+ * Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID:
+ * Query successful.
+ * DRM_SCANOUTPOS_INVBL:
+ * Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE: Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a
+ * constant but unknown small number of scanlines wrt. real scanout
+ * position.
+ *
+ */
+ int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+ /**
+ * @get_vblank_timestamp:
+ *
+ * Called by drm_get_last_vbltimestamp(). Should return a precise
+ * timestamp when the most recent VBLANK interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of VBLANK will start scanning out,
+ * the time immediately after end of the VBLANK interval. If the
+ * @crtc is currently inside VBLANK, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * Paramters:
+ *
+ * dev:
+ * dev DRM device handle.
+ * pipe:
+ * crtc for which timestamp should be returned.
+ * max_error:
+ * Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * vblank_time:
+ * Target location for returned vblank timestamp.
+ * flags:
+ * 0 = Defaults, no special treatment needed.
+ * DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+ * irq handler. Some drivers need to apply some workarounds
+ * for gpu-specific vblank irq quirks if flag is set.
+ *
+ * Returns:
+ *
+ * Zero if timestamping isn't supported in current display mode or a
+ * negative number on failure. A positive status code on success,
+ * which describes how the vblank_time timestamp was computed.
+ */
+ int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+ /* these have to be filled in */
+
+ irqreturn_t(*irq_handler) (int irq, void *arg);
+ void (*irq_preinstall) (struct drm_device *dev);
+ int (*irq_postinstall) (struct drm_device *dev);
+ void (*irq_uninstall) (struct drm_device *dev);
+
+ /**
+ * @master_create:
+ *
+ * Called whenever a new master is created. Only used by vmwgfx.
+ */
+ int (*master_create)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @master_destroy:
+ *
+ * Called whenever a master is destroyed. Only used by vmwgfx.
+ */
+ void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * @master_set:
+ *
+ * Called whenever the minor master is set. Only used by vmwgfx.
+ */
+ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
+ /**
+ * @master_drop:
+ *
+ * Called whenever the minor master is dropped. Only used by vmwgfx.
+ */
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
+
+ int (*debugfs_init)(struct drm_minor *minor);
+ void (*debugfs_cleanup)(struct drm_minor *minor);
+
+ /**
+ * @gem_free_object: deconstructor for drm_gem_objects
+ *
+ * This is deprecated and should not be used by new drivers. Use
+ * @gem_free_object_unlocked instead.
+ */
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+ /**
+ * @gem_free_object_unlocked: deconstructor for drm_gem_objects
+ *
+ * This is for drivers which are not encumbered with dev->struct_mutex
+ * legacy locking schemes. Use this hook instead of @gem_free_object.
+ */
+ void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
+
+ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+
+ /**
+ * @gem_create_object: constructor for gem objects
+ *
+ * Hook for allocating the GEM object struct, for use by core
+ * helpers.
+ */
+ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
+ size_t size);
+
+ /* prime: */
+ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags, int *prime_fd);
+ /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+ int prime_fd, uint32_t *handle);
+ /* export GEM -> dmabuf */
+ struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+ /* import dmabuf -> GEM */
+ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+ /* low-level interface used by drm_gem_prime_{import,export} */
+ int (*gem_prime_pin)(struct drm_gem_object *obj);
+ void (*gem_prime_unpin)(struct drm_gem_object *obj);
+ struct reservation_object * (*gem_prime_res_obj)(
+ struct drm_gem_object *obj);
+ struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*gem_prime_import_sg_table)(
+ struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+ void *(*gem_prime_vmap)(struct drm_gem_object *obj);
+ void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
+ int (*gem_prime_mmap)(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+ /* vga arb irq handler */
+ void (*vgaarb_irq)(struct drm_device *dev, bool state);
+
+ /**
+ * @dumb_create:
+ *
+ * This creates a new dumb buffer in the driver's backing storage manager (GEM,
+ * TTM or something else entirely) and returns the resulting buffer handle. This
+ * handle can then be wrapped up into a framebuffer modeset object.
+ *
+ * Note that userspace is not allowed to use such objects for render
+ * acceleration - drivers must create their own private ioctls for such a use
+ * case.
+ *
+ * Width, height and depth are specified in the &drm_mode_create_dumb
+ * argument. The callback needs to fill the handle, pitch and size for
+ * the created buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ /**
+ * @dumb_map_offset:
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer. GEM-based drivers must use
+ * drm_gem_create_mmap_offset() to implement this.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ /**
+ * @dumb_destroy:
+ *
+ * This destroys the userspace handle for the given dumb backing storage buffer.
+ * Since buffer objects must be reference counted in the kernel a buffer object
+ * won't be immediately freed if a framebuffer modeset object still uses it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ *
+ * Zero on success, negative errno on failure.
+ */
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
+ /* Driver private ops for this object */
+ const struct vm_operations_struct *gem_vm_ops;
+
+ int major;
+ int minor;
+ int patchlevel;
+ char *name;
+ char *desc;
+ char *date;
+
+ u32 driver_features;
+ int dev_priv_size;
+ const struct drm_ioctl_desc *ioctls;
+ int num_ioctls;
+ const struct file_operations *fops;
+
+ /* List of devices hanging off this driver with stealth attach. */
+ struct list_head legacy_dev_list;
+};
+
+extern __printf(6, 7)
+void drm_dev_printk(const struct device *dev, const char *level,
+ unsigned int category, const char *function_name,
+ const char *prefix, const char *format, ...);
+extern __printf(3, 4)
+void drm_printk(const char *level, unsigned int category,
+ const char *format, ...);
+extern unsigned int drm_debug;
+
+int drm_dev_init(struct drm_device *dev,
+ struct drm_driver *driver,
+ struct device *parent);
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent);
+int drm_dev_register(struct drm_device *dev, unsigned long flags);
+void drm_dev_unregister(struct drm_device *dev);
+
+void drm_dev_ref(struct drm_device *dev);
+void drm_dev_unref(struct drm_device *dev);
+void drm_put_dev(struct drm_device *dev);
+void drm_unplug_dev(struct drm_device *dev);
+
+int drm_dev_set_unique(struct drm_device *dev, const char *name);
+
+
+#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3a7d440bc11..2705a66b770b 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -248,6 +248,7 @@ struct detailed_timing {
# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_MASK 0x7f
# define DRM_ELD_SPEAKER_RLRC (1 << 6)
# define DRM_ELD_SPEAKER_FLRC (1 << 5)
# define DRM_ELD_SPEAKER_RC (1 << 4)
@@ -330,7 +331,6 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
int drm_load_edid_firmware(struct drm_connector *connector);
@@ -415,6 +415,18 @@ static inline int drm_eld_size(const uint8_t *eld)
}
/**
+ * drm_eld_get_spk_alloc - Get speaker allocation
+ * @eld: pointer to an ELD memory structure
+ *
+ * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
+ * field definitions to identify speakers.
+ */
+static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld)
+{
+ return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
+}
+
+/**
* drm_eld_get_conn_type - Get device type hdmi/dp connected
* @eld: pointer to an ELD memory structure
*
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 387e33a4d6ee..c7438ff0d609 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -189,7 +189,7 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
}
/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
-static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
+static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc);
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index f313211f8ed5..3b00f6480b83 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -12,6 +12,8 @@ struct drm_fb_helper;
struct drm_device;
struct drm_file;
struct drm_mode_fb_cmd2;
+struct drm_plane;
+struct drm_plane_state;
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
@@ -41,6 +43,9 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
#ifdef CONFIG_DEBUG_FS
struct seq_file;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index ed8edfef75b2..975deedd593e 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -228,7 +228,9 @@ struct drm_fb_helper {
.fb_set_par = drm_fb_helper_set_par, \
.fb_setcmap = drm_fb_helper_setcmap, \
.fb_blank = drm_fb_helper_blank, \
- .fb_pan_display = drm_fb_helper_pan_display
+ .fb_pan_display = drm_fb_helper_pan_display, \
+ .fb_debug_enter = drm_fb_helper_debug_enter, \
+ .fb_debug_leave = drm_fb_helper_debug_leave
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 30c30fa87ee8..fcc08da850c8 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,14 +25,43 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+/**
+ * struct drm_format_info - information about a DRM format
+ * @format: 4CC format identifier (DRM_FORMAT_*)
+ * @depth: Color depth (number of bits per pixel excluding padding bits),
+ * valid for a subset of RGB formats only. This is a legacy field, do not
+ * use in new code and set to 0 for new formats.
+ * @num_planes: Number of color planes (1 to 3)
+ * @cpp: Number of bytes per pixel (per plane)
+ * @hsub: Horizontal chroma subsampling factor
+ * @vsub: Vertical chroma subsampling factor
+ */
+struct drm_format_info {
+ u32 format;
+ u8 depth;
+ u8 num_planes;
+ u8 cpp[3];
+ u8 hsub;
+ u8 vsub;
+};
+
+/**
+ * struct drm_format_name_buf - name of a DRM format
+ * @str: string buffer containing the format name
+ */
+struct drm_format_name_buf {
+ char str[32];
+};
+
+const struct drm_format_info *__drm_format_info(u32 format);
+const struct drm_format_info *drm_format_info(u32 format);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
-char *drm_get_format_name(uint32_t format) __malloc;
+const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index f5ae1f436a4b..1ddfa2928802 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -149,12 +149,12 @@ struct drm_framebuffer {
*/
unsigned int offsets[4];
/**
- * @modifier: Data layout modifier, per buffer. This is used to describe
+ * @modifier: Data layout modifier. This is used to describe
* tiling, or also special layouts (like compression) of auxiliary
* buffers. For userspace created object this is copied from
* drm_mode_fb_cmd2.
*/
- uint64_t modifier[4];
+ uint64_t modifier;
/**
* @width: Logical width of the visible area of the framebuffer, in
* pixels.
@@ -251,6 +251,24 @@ static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
}
/**
+ * drm_framebuffer_assign - store a reference to the fb
+ * @p: location to store framebuffer
+ * @fb: new framebuffer (maybe NULL)
+ *
+ * This functions sets the location to store a reference to the framebuffer,
+ * unreferencing the framebuffer that was previously stored in that location.
+ */
+static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
+ struct drm_framebuffer *fb)
+{
+ if (fb)
+ drm_framebuffer_reference(fb);
+ if (*p)
+ drm_framebuffer_unreference(*p);
+ *p = fb;
+}
+
+/*
* drm_for_each_fb - iterate over all framebuffers
* @fb: the loop cursor
* @dev: the DRM device
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
index 2401b14d301f..293d08caab60 100644
--- a/include/drm/drm_irq.h
+++ b/include/drm/drm_irq.h
@@ -130,42 +130,37 @@ struct drm_vblank_crtc {
bool enabled;
};
-extern int drm_irq_install(struct drm_device *dev, int irq);
-extern int drm_irq_uninstall(struct drm_device *dev);
+int drm_irq_install(struct drm_device *dev, int irq);
+int drm_irq_uninstall(struct drm_device *dev);
-extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
-extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime);
-extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
-extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
-extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
+u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ struct timeval *vblanktime);
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
+int drm_crtc_vblank_get(struct drm_crtc *crtc);
+void drm_crtc_vblank_put(struct drm_crtc *crtc);
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
+void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+void drm_crtc_vblank_off(struct drm_crtc *crtc);
+void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on(struct drm_crtc *crtc);
+void drm_vblank_cleanup(struct drm_device *dev);
+u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
+u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
-extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe, int *max_error,
- struct timeval *vblank_time,
- unsigned flags,
- const struct drm_display_mode *mode);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
- const struct drm_display_mode *mode);
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ const struct drm_display_mode *mode);
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 205ddcf6d55d..0b8371795aeb 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -44,6 +44,9 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
+#ifdef CONFIG_DRM_DEBUG_MM
+#include <linux/stackdepot.h>
+#endif
enum drm_mm_search_flags {
DRM_MM_SEARCH_DEFAULT = 0,
@@ -74,6 +77,9 @@ struct drm_mm_node {
u64 size;
u64 __subtree_last;
struct drm_mm *mm;
+#ifdef CONFIG_DRM_DEBUG_MM
+ depot_stack_handle_t stack;
+#endif
};
struct drm_mm {
@@ -302,10 +308,26 @@ void drm_mm_takedown(struct drm_mm *mm);
bool drm_mm_clean(struct drm_mm *mm);
struct drm_mm_node *
-drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
+__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
-struct drm_mm_node *
-drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
+/**
+ * drm_mm_for_each_node_in_range - iterator to walk over a range of
+ * allocated nodes
+ * @node__: drm_mm_node structure to assign to in each iteration step
+ * @mm__: drm_mm allocator to walk
+ * @start__: starting offset, the first node will overlap this
+ * @end__: ending offset, the last node will start before this (but may overlap)
+ *
+ * This iterator walks over all nodes in the range allocator that lie
+ * between @start and @end. It is implemented similarly to list_for_each(),
+ * but using the internal interval tree to accelerate the search for the
+ * starting node, and so not safe against removal of elements. It assumes
+ * that @end is within (or is the upper limit of) the drm_mm allocator.
+ */
+#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \
+ for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
+ node__ && node__->start < (end__); \
+ node__ = list_next_entry(node__, node_list))
void drm_mm_init_scan(struct drm_mm *mm,
u64 size,
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
new file mode 100644
index 000000000000..bf9991b20611
--- /dev/null
+++ b/include/drm/drm_mode_config.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_MODE_CONFIG_H__
+#define __DRM_MODE_CONFIG_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_modeset_lock.h>
+
+struct drm_file;
+struct drm_device;
+struct drm_atomic_state;
+struct drm_mode_fb_cmd2;
+
+/**
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
+ */
+struct drm_mode_config_funcs {
+ /**
+ * @fb_create:
+ *
+ * Create a new framebuffer object. The core does basic checks on the
+ * requested metadata, but most of that is left to the driver. See
+ * struct &drm_mode_fb_cmd2 for details.
+ *
+ * If the parameters are deemed valid and the backing storage objects in
+ * the underlying memory manager all exist, then the driver allocates
+ * a new &drm_framebuffer structure, subclassed to contain
+ * driver-specific information (like the internal native buffer object
+ * references). It also needs to fill out all relevant metadata, which
+ * should be done by calling drm_helper_mode_fill_fb_struct().
+ *
+ * The initialization is finalized by calling drm_framebuffer_init(),
+ * which registers the framebuffer and makes it accessible to other
+ * threads.
+ *
+ * RETURNS:
+ *
+ * A new framebuffer with an initial reference count of 1 or a negative
+ * error code encoded with ERR_PTR().
+ */
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+ /**
+ * @output_poll_changed:
+ *
+ * Callback used by helpers to inform the driver of output configuration
+ * changes.
+ *
+ * Drivers implementing fbdev emulation with the helpers can call
+ * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
+ * helper of output changes.
+ *
+ * FIXME:
+ *
+ * Except that there's no vtable for device-level helper callbacks
+ * there's no reason this is a core function.
+ */
+ void (*output_poll_changed)(struct drm_device *dev);
+
+ /**
+ * @atomic_check:
+ *
+ * This is the only hook to validate an atomic modeset update. This
+ * function must reject any modeset and state changes which the hardware
+ * or driver doesn't support. This includes but is of course not limited
+ * to:
+ *
+ * - Checking that the modes, framebuffers, scaling and placement
+ * requirements and so on are within the limits of the hardware.
+ *
+ * - Checking that any hidden shared resources are not oversubscribed.
+ * This can be shared PLLs, shared lanes, overall memory bandwidth,
+ * display fifo space (where shared between planes or maybe even
+ * CRTCs).
+ *
+ * - Checking that virtualized resources exported to userspace are not
+ * oversubscribed. For various reasons it can make sense to expose
+ * more planes, crtcs or encoders than which are physically there. One
+ * example is dual-pipe operations (which generally should be hidden
+ * from userspace if when lockstepped in hardware, exposed otherwise),
+ * where a plane might need 1 hardware plane (if it's just on one
+ * pipe), 2 hardware planes (when it spans both pipes) or maybe even
+ * shared a hardware plane with a 2nd plane (if there's a compatible
+ * plane requested on the area handled by the other pipe).
+ *
+ * - Check that any transitional state is possible and that if
+ * requested, the update can indeed be done in the vblank period
+ * without temporarily disabling some functions.
+ *
+ * - Check any other constraints the driver or hardware might have.
+ *
+ * - This callback also needs to correctly fill out the &drm_crtc_state
+ * in this update to make sure that drm_atomic_crtc_needs_modeset()
+ * reflects the nature of the possible update and returns true if and
+ * only if the update cannot be applied without tearing within one
+ * vblank on that CRTC. The core uses that information to reject
+ * updates which require a full modeset (i.e. blanking the screen, or
+ * at least pausing updates for a substantial amount of time) if
+ * userspace has disallowed that in its request.
+ *
+ * - The driver also does not need to repeat basic input validation
+ * like done for the corresponding legacy entry points. The core does
+ * that before calling this hook.
+ *
+ * See the documentation of @atomic_commit for an exhaustive list of
+ * error conditions which don't have to be checked at the
+ * ->atomic_check() stage?
+ *
+ * See the documentation for struct &drm_atomic_state for how exactly
+ * an atomic modeset update is described.
+ *
+ * Drivers using the atomic helpers can implement this hook using
+ * drm_atomic_helper_check(), or one of the exported sub-functions of
+ * it.
+ *
+ * RETURNS:
+ *
+ * 0 on success or one of the below negative error codes:
+ *
+ * - -EINVAL, if any of the above constraints are violated.
+ *
+ * - -EDEADLK, when returned from an attempt to acquire an additional
+ * &drm_modeset_lock through drm_modeset_lock().
+ *
+ * - -ENOMEM, if allocating additional state sub-structures failed due
+ * to lack of memory.
+ *
+ * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
+ * This can either be due to a pending signal, or because the driver
+ * needs to completely bail out to recover from an exceptional
+ * situation like a GPU hang. From a userspace point all errors are
+ * treated equally.
+ */
+ int (*atomic_check)(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+ /**
+ * @atomic_commit:
+ *
+ * This is the only hook to commit an atomic modeset update. The core
+ * guarantees that @atomic_check has been called successfully before
+ * calling this function, and that nothing has been changed in the
+ * interim.
+ *
+ * See the documentation for struct &drm_atomic_state for how exactly
+ * an atomic modeset update is described.
+ *
+ * Drivers using the atomic helpers can implement this hook using
+ * drm_atomic_helper_commit(), or one of the exported sub-functions of
+ * it.
+ *
+ * Nonblocking commits (as indicated with the nonblock parameter) must
+ * do any preparatory work which might result in an unsuccessful commit
+ * in the context of this callback. The only exceptions are hardware
+ * errors resulting in -EIO. But even in that case the driver must
+ * ensure that the display pipe is at least running, to avoid
+ * compositors crashing when pageflips don't work. Anything else,
+ * specifically committing the update to the hardware, should be done
+ * without blocking the caller. For updates which do not require a
+ * modeset this must be guaranteed.
+ *
+ * The driver must wait for any pending rendering to the new
+ * framebuffers to complete before executing the flip. It should also
+ * wait for any pending rendering from other drivers if the underlying
+ * buffer is a shared dma-buf. Nonblocking commits must not wait for
+ * rendering in the context of this callback.
+ *
+ * An application can request to be notified when the atomic commit has
+ * completed. These events are per-CRTC and can be distinguished by the
+ * CRTC index supplied in &drm_event to userspace.
+ *
+ * The drm core will supply a struct &drm_event in the event
+ * member of each CRTC's &drm_crtc_state structure. See the
+ * documentation for &drm_crtc_state for more details about the precise
+ * semantics of this event.
+ *
+ * NOTE:
+ *
+ * Drivers are not allowed to shut down any display pipe successfully
+ * enabled through an atomic commit on their own. Doing so can result in
+ * compositors crashing if a page flip is suddenly rejected because the
+ * pipe is off.
+ *
+ * RETURNS:
+ *
+ * 0 on success or one of the below negative error codes:
+ *
+ * - -EBUSY, if a nonblocking updated is requested and there is
+ * an earlier updated pending. Drivers are allowed to support a queue
+ * of outstanding updates, but currently no driver supports that.
+ * Note that drivers must wait for preceding updates to complete if a
+ * synchronous update is requested, they are not allowed to fail the
+ * commit in that case.
+ *
+ * - -ENOMEM, if the driver failed to allocate memory. Specifically
+ * this can happen when trying to pin framebuffers, which must only
+ * be done when committing the state.
+ *
+ * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
+ * that the driver has run out of vram, iommu space or similar GPU
+ * address space needed for framebuffer.
+ *
+ * - -EIO, if the hardware completely died.
+ *
+ * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
+ * This can either be due to a pending signal, or because the driver
+ * needs to completely bail out to recover from an exceptional
+ * situation like a GPU hang. From a userspace point of view all errors are
+ * treated equally.
+ *
+ * This list is exhaustive. Specifically this hook is not allowed to
+ * return -EINVAL (any invalid requests should be caught in
+ * @atomic_check) or -EDEADLK (this function must not acquire
+ * additional modeset locks).
+ */
+ int (*atomic_commit)(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock);
+
+ /**
+ * @atomic_state_alloc:
+ *
+ * This optional hook can be used by drivers that want to subclass struct
+ * &drm_atomic_state to be able to track their own driver-private global
+ * state easily. If this hook is implemented, drivers must also
+ * implement @atomic_state_clear and @atomic_state_free.
+ *
+ * RETURNS:
+ *
+ * A new &drm_atomic_state on success or NULL on failure.
+ */
+ struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
+
+ /**
+ * @atomic_state_clear:
+ *
+ * This hook must clear any driver private state duplicated into the
+ * passed-in &drm_atomic_state. This hook is called when the caller
+ * encountered a &drm_modeset_lock deadlock and needs to drop all
+ * already acquired locks as part of the deadlock avoidance dance
+ * implemented in drm_modeset_lock_backoff().
+ *
+ * Any duplicated state must be invalidated since a concurrent atomic
+ * update might change it, and the drm atomic interfaces always apply
+ * updates as relative changes to the current state.
+ *
+ * Drivers that implement this must call drm_atomic_state_default_clear()
+ * to clear common state.
+ */
+ void (*atomic_state_clear)(struct drm_atomic_state *state);
+
+ /**
+ * @atomic_state_free:
+ *
+ * This hook needs driver private resources and the &drm_atomic_state
+ * itself. Note that the core first calls drm_atomic_state_clear() to
+ * avoid code duplicate between the clear and free hooks.
+ *
+ * Drivers that implement this must call drm_atomic_state_default_free()
+ * to release common resources.
+ */
+ void (*atomic_state_free)(struct drm_atomic_state *state);
+};
+
+/**
+ * struct drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @connection_mutex: ww mutex protecting connector state and routing
+ * @acquire_ctx: global implicit acquire context used by atomic drivers for
+ * legacy IOCTLs
+ * @fb_lock: mutex to protect fb state and lists
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_overlay_plane: number of overlay planes on this device
+ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
+ * @plane_list: list of plane objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @property_list: list of property objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling support for this device
+ * @poll_running: track polling status for this device
+ * @delayed_event: track delayed poll uevent deliver for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @property_blob_list: list of all the blob property objects
+ * @blob_lock: mutex for blob property allocation and management
+ * @*_property: core property tracking
+ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
+ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
+ * @cursor_width: hint to userspace for max cursor width
+ * @cursor_height: hint to userspace for max cursor height
+ * @helper_private: mid-layer private data
+ *
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+ * global restrictions are also here, e.g. dimension restrictions.
+ */
+struct drm_mode_config {
+ struct mutex mutex; /* protects configuration (mode lists etc.) */
+ struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
+ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
+
+ /**
+ * @idr_mutex:
+ *
+ * Mutex for KMS ID allocation and management. Protects both @crtc_idr
+ * and @tile_idr.
+ */
+ struct mutex idr_mutex;
+
+ /**
+ * @crtc_idr:
+ *
+ * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
+ * connector, modes - just makes life easier to have only one.
+ */
+ struct idr crtc_idr;
+
+ /**
+ * @tile_idr:
+ *
+ * Use this idr for allocating new IDs for tiled sinks like use in some
+ * high-res DP MST screens.
+ */
+ struct idr tile_idr;
+
+ struct mutex fb_lock; /* proctects global and per-file fb lists */
+ int num_fb;
+ struct list_head fb_list;
+
+ /**
+ * @num_connector: Number of connectors on this device.
+ */
+ int num_connector;
+ /**
+ * @connector_ida: ID allocator for connector indices.
+ */
+ struct ida connector_ida;
+ /**
+ * @connector_list: List of connector objects.
+ */
+ struct list_head connector_list;
+ int num_encoder;
+ struct list_head encoder_list;
+
+ /*
+ * Track # of overlay planes separately from # of total planes. By
+ * default we only advertise overlay planes to userspace; if userspace
+ * sets the "universal plane" capability bit, we'll go ahead and
+ * expose all planes.
+ */
+ int num_overlay_plane;
+ int num_total_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+ struct list_head crtc_list;
+
+ struct list_head property_list;
+
+ int min_width, min_height;
+ int max_width, max_height;
+ const struct drm_mode_config_funcs *funcs;
+ resource_size_t fb_base;
+
+ /* output poll support */
+ bool poll_enabled;
+ bool poll_running;
+ bool delayed_event;
+ struct delayed_work output_poll_work;
+
+ struct mutex blob_lock;
+
+ /* pointers to standard properties */
+ struct list_head property_blob_list;
+ /**
+ * @edid_property: Default connector property to hold the EDID of the
+ * currently connected sink, if any.
+ */
+ struct drm_property *edid_property;
+ /**
+ * @dpms_property: Default connector property to control the
+ * connector's DPMS state.
+ */
+ struct drm_property *dpms_property;
+ /**
+ * @path_property: Default connector property to hold the DP MST path
+ * for the port.
+ */
+ struct drm_property *path_property;
+ /**
+ * @tile_property: Default connector property to store the tile
+ * position of a tiled screen, for sinks which need to be driven with
+ * multiple CRTCs.
+ */
+ struct drm_property *tile_property;
+ /**
+ * @plane_type_property: Default plane property to differentiate
+ * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
+ */
+ struct drm_property *plane_type_property;
+ /**
+ * @prop_src_x: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_x;
+ /**
+ * @prop_src_y: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_y;
+ /**
+ * @prop_src_w: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_w;
+ /**
+ * @prop_src_h: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
+ struct drm_property *prop_src_h;
+ /**
+ * @prop_crtc_x: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_x;
+ /**
+ * @prop_crtc_y: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_y;
+ /**
+ * @prop_crtc_w: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_w;
+ /**
+ * @prop_crtc_h: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
+ struct drm_property *prop_crtc_h;
+ /**
+ * @prop_fb_id: Default atomic plane property to specify the
+ * &drm_framebuffer.
+ */
+ struct drm_property *prop_fb_id;
+ /**
+ * @prop_in_fence_fd: Sync File fd representing the incoming fences
+ * for a Plane.
+ */
+ struct drm_property *prop_in_fence_fd;
+ /**
+ * @prop_out_fence_ptr: Sync File fd pointer representing the
+ * outgoing fences for a CRTC. Userspace should provide a pointer to a
+ * value of type s64, and then cast that pointer to u64.
+ */
+ struct drm_property *prop_out_fence_ptr;
+ /**
+ * @prop_crtc_id: Default atomic plane property to specify the
+ * &drm_crtc.
+ */
+ struct drm_property *prop_crtc_id;
+ /**
+ * @prop_active: Default atomic CRTC property to control the active
+ * state, which is the simplified implementation for DPMS in atomic
+ * drivers.
+ */
+ struct drm_property *prop_active;
+ /**
+ * @prop_mode_id: Default atomic CRTC property to set the mode for a
+ * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
+ * connectors must be of and active must be set to disabled, too.
+ */
+ struct drm_property *prop_mode_id;
+
+ /**
+ * @dvi_i_subconnector_property: Optional DVI-I property to
+ * differentiate between analog or digital mode.
+ */
+ struct drm_property *dvi_i_subconnector_property;
+ /**
+ * @dvi_i_select_subconnector_property: Optional DVI-I property to
+ * select between analog or digital mode.
+ */
+ struct drm_property *dvi_i_select_subconnector_property;
+
+ /**
+ * @tv_subconnector_property: Optional TV property to differentiate
+ * between different TV connector types.
+ */
+ struct drm_property *tv_subconnector_property;
+ /**
+ * @tv_select_subconnector_property: Optional TV property to select
+ * between different TV connector types.
+ */
+ struct drm_property *tv_select_subconnector_property;
+ /**
+ * @tv_mode_property: Optional TV property to select
+ * the output TV mode.
+ */
+ struct drm_property *tv_mode_property;
+ /**
+ * @tv_left_margin_property: Optional TV property to set the left
+ * margin.
+ */
+ struct drm_property *tv_left_margin_property;
+ /**
+ * @tv_right_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_right_margin_property;
+ /**
+ * @tv_top_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_top_margin_property;
+ /**
+ * @tv_bottom_margin_property: Optional TV property to set the right
+ * margin.
+ */
+ struct drm_property *tv_bottom_margin_property;
+ /**
+ * @tv_brightness_property: Optional TV property to set the
+ * brightness.
+ */
+ struct drm_property *tv_brightness_property;
+ /**
+ * @tv_contrast_property: Optional TV property to set the
+ * contrast.
+ */
+ struct drm_property *tv_contrast_property;
+ /**
+ * @tv_flicker_reduction_property: Optional TV property to control the
+ * flicker reduction mode.
+ */
+ struct drm_property *tv_flicker_reduction_property;
+ /**
+ * @tv_overscan_property: Optional TV property to control the overscan
+ * setting.
+ */
+ struct drm_property *tv_overscan_property;
+ /**
+ * @tv_saturation_property: Optional TV property to set the
+ * saturation.
+ */
+ struct drm_property *tv_saturation_property;
+ /**
+ * @tv_hue_property: Optional TV property to set the hue.
+ */
+ struct drm_property *tv_hue_property;
+
+ /**
+ * @scaling_mode_property: Optional connector property to control the
+ * upscaling, mostly used for built-in panels.
+ */
+ struct drm_property *scaling_mode_property;
+ /**
+ * @aspect_ratio_property: Optional connector property to control the
+ * HDMI infoframe aspect ratio setting.
+ */
+ struct drm_property *aspect_ratio_property;
+ /**
+ * @degamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the framebuffer's colors to linear gamma.
+ */
+ struct drm_property *degamma_lut_property;
+ /**
+ * @degamma_lut_size_property: Optional CRTC property for the size of
+ * the degamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *degamma_lut_size_property;
+ /**
+ * @ctm_property: Optional CRTC property to set the
+ * matrix used to convert colors after the lookup in the
+ * degamma LUT.
+ */
+ struct drm_property *ctm_property;
+ /**
+ * @gamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the colors, after the CTM matrix, to the gamma space of the
+ * connected screen.
+ */
+ struct drm_property *gamma_lut_property;
+ /**
+ * @gamma_lut_size_property: Optional CRTC property for the size of the
+ * gamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *gamma_lut_size_property;
+
+ /**
+ * @suggested_x_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
+ struct drm_property *suggested_x_property;
+ /**
+ * @suggested_y_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
+ struct drm_property *suggested_y_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+
+ /**
+ * @async_page_flip: Does this device support async flips on the primary
+ * plane?
+ */
+ bool async_page_flip;
+
+ /**
+ * @allow_fb_modifiers:
+ *
+ * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ */
+ bool allow_fb_modifiers;
+
+ /* cursor size */
+ uint32_t cursor_width, cursor_height;
+
+ struct drm_mode_config_helper_funcs *helper_private;
+};
+
+void drm_mode_config_init(struct drm_device *dev);
+void drm_mode_config_reset(struct drm_device *dev);
+void drm_mode_config_cleanup(struct drm_device *dev);
+
+#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 10e449c86dbd..69c3974bf133 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -361,8 +361,8 @@ struct drm_crtc_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -385,8 +385,8 @@ struct drm_crtc_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -940,8 +940,8 @@ struct drm_plane_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -963,8 +963,8 @@ struct drm_plane_helper_funcs {
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
- * has picked. See drm_atomic_commit_planes() for a discussion of the
- * tradeoffs and variants of plane commit helpers.
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
* transitional plane helpers, but it is optional.
@@ -999,10 +999,14 @@ struct drm_mode_config_helper_funcs {
* to implement blocking and nonblocking commits easily. It is not used
* by the atomic helpers
*
- * This hook should first commit the given atomic state to the hardware.
- * But drivers can add more waiting calls at the start of their
- * implementation, e.g. to wait for driver-internal request for implicit
- * syncing, before starting to commit the update to the hardware.
+ * This function is called when the new atomic state has already been
+ * swapped into the various state pointers. The passed in state
+ * therefore contains copies of the old/previous state. This hook should
+ * commit the new state into hardware. Note that the helpers have
+ * already waited for preceeding atomic commits and fences, but drivers
+ * can add more waiting calls at the start of their implementation, e.g.
+ * to wait for driver-internal request for implicit syncing, before
+ * starting to commit the update to the hardware.
*
* After the atomic update is committed to the hardware this hook needs
* to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index c5576fbcb909..d918ce45ec2c 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -82,8 +82,6 @@ struct drm_modeset_lock {
struct list_head head;
};
-extern struct ww_class crtc_ww_class;
-
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags);
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
@@ -91,15 +89,7 @@ void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
-/**
- * drm_modeset_lock_init - initialize lock
- * @lock: lock to init
- */
-static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
-{
- ww_mutex_init(&lock->mutex, &crtc_ww_class);
- INIT_LIST_HEAD(&lock->head);
-}
+void drm_modeset_lock_init(struct drm_modeset_lock *lock);
/**
* drm_modeset_lock_fini - cleanup lock
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 3fd87b386ed7..26a64805cc15 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -4,6 +4,7 @@
#include <linux/of_graph.h>
struct component_master_ops;
+struct component_match;
struct device;
struct drm_device;
struct drm_encoder;
@@ -12,6 +13,10 @@ struct device_node;
#ifdef CONFIG_OF
extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port);
+extern void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node);
extern int drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
const struct component_master_ops *m_ops);
@@ -25,6 +30,14 @@ static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
return 0;
}
+static inline void
+drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node)
+{
+}
+
static inline int
drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8b4dc62470ff..db3bbdeb36d5 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -28,15 +28,11 @@
#include <drm/drm_mode_object.h>
struct drm_crtc;
+struct drm_printer;
/**
* struct drm_plane_state - mutable plane state
* @plane: backpointer to the plane
- * @crtc: currently bound CRTC, NULL if disabled
- * @fb: currently bound framebuffer
- * @fence: optional fence to wait for before scanning out @fb
- * @crtc_x: left position of visible portion of plane on crtc
- * @crtc_y: upper position of visible portion of plane on crtc
* @crtc_w: width of visible portion of plane on crtc
* @crtc_h: height of visible portion of plane on crtc
* @src_x: left position of visible portion of plane within
@@ -57,18 +53,51 @@ struct drm_crtc;
* it can be trusted.
* @src: clipped source coordinates of the plane (in 16.16)
* @dst: clipped destination coordinates of the plane
- * @visible: visibility of the plane
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
struct drm_plane *plane;
- struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
- struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
- struct fence *fence;
+ /**
+ * @crtc:
+ *
+ * Currently bound CRTC, NULL if disabled. Do not this write directly,
+ * use drm_atomic_set_crtc_for_plane()
+ */
+ struct drm_crtc *crtc;
+
+ /**
+ * @fb:
+ *
+ * Currently bound framebuffer. Do not write this directly, use
+ * drm_atomic_set_fb_for_plane()
+ */
+ struct drm_framebuffer *fb;
+
+ /**
+ * @fence:
+ *
+ * Optional fence to wait for before scanning out @fb. Do not write this
+ * directly, use drm_atomic_set_fence_for_plane()
+ */
+ struct dma_fence *fence;
+
+ /**
+ * @crtc_x:
+ *
+ * Left position of visible portion of plane on crtc, signed dest
+ * location allows it to be partially off screen.
+ */
+
+ int32_t crtc_x;
+ /**
+ * @crtc_y:
+ *
+ * Upper position of visible portion of plane on crtc, signed dest
+ * location allows it to be partially off screen.
+ */
+ int32_t crtc_y;
- /* Signed dest location allows it to be partially off screen */
- int32_t crtc_x, crtc_y;
uint32_t crtc_w, crtc_h;
/* Source values are 16.16 fixed point */
@@ -85,15 +114,40 @@ struct drm_plane_state {
/* Clipped coordinates */
struct drm_rect src, dst;
- /*
- * Is the plane actually visible? Can be false even
- * if fb!=NULL and crtc!=NULL, due to clipping.
+ /**
+ * @visible:
+ *
+ * Visibility of the plane. This can be false even if fb!=NULL and
+ * crtc!=NULL, due to clipping.
*/
bool visible;
struct drm_atomic_state *state;
};
+static inline struct drm_rect
+drm_plane_state_src(const struct drm_plane_state *state)
+{
+ struct drm_rect src = {
+ .x1 = state->src_x,
+ .y1 = state->src_y,
+ .x2 = state->src_x + state->src_w,
+ .y2 = state->src_y + state->src_h,
+ };
+ return src;
+}
+
+static inline struct drm_rect
+drm_plane_state_dest(const struct drm_plane_state *state)
+{
+ struct drm_rect dest = {
+ .x1 = state->crtc_x,
+ .y1 = state->crtc_y,
+ .x2 = state->crtc_x + state->crtc_w,
+ .y2 = state->crtc_y + state->crtc_h,
+ };
+ return dest;
+}
/**
* struct drm_plane_funcs - driver plane control functions
@@ -323,6 +377,18 @@ struct drm_plane_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_plane *plane);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses struct &drm_plane_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_plane_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_plane_state *state);
};
/**
@@ -392,6 +458,7 @@ enum drm_plane_type {
* @type: type of plane (overlay, primary, cursor)
* @state: current atomic state for this plane
* @zpos_property: zpos property for this plane
+ * @rotation_property: rotation property for this plane
* @helper_private: mid-layer private data
*/
struct drm_plane {
@@ -438,6 +505,7 @@ struct drm_plane {
struct drm_plane_state *state;
struct drm_property *zpos_property;
+ struct drm_property *rotation_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
@@ -445,7 +513,7 @@ struct drm_plane {
extern __printf(8, 9)
int drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
- unsigned long possible_crtcs,
+ uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
unsigned int format_count,
@@ -453,7 +521,7 @@ int drm_universal_plane_init(struct drm_device *dev,
const char *name, ...);
extern int drm_plane_init(struct drm_device *dev,
struct drm_plane *plane,
- unsigned long possible_crtcs,
+ uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
bool is_primary);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
new file mode 100644
index 000000000000..1adf84aea622
--- /dev/null
+++ b/include/drm/drm_print.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef DRM_PRINT_H_
+#define DRM_PRINT_H_
+
+#include <linux/seq_file.h>
+#include <linux/device.h>
+
+/**
+ * DOC: print
+ *
+ * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same
+ * debug code to be used for both debugfs and printk logging.
+ *
+ * For example::
+ *
+ * void log_some_info(struct drm_printer *p)
+ * {
+ * drm_printf(p, "foo=%d\n", foo);
+ * drm_printf(p, "bar=%d\n", bar);
+ * }
+ *
+ * #ifdef CONFIG_DEBUG_FS
+ * void debugfs_show(struct seq_file *f)
+ * {
+ * struct drm_printer p = drm_seq_file_printer(f);
+ * log_some_info(&p);
+ * }
+ * #endif
+ *
+ * void some_other_function(...)
+ * {
+ * struct drm_printer p = drm_info_printer(drm->dev);
+ * log_some_info(&p);
+ * }
+ */
+
+/**
+ * struct drm_printer - drm output "stream"
+ * @printfn: actual output fxn
+ * @arg: output fxn specific data
+ *
+ * Do not use struct members directly. Use drm_printer_seq_file(),
+ * drm_printer_info(), etc to initialize. And drm_printf() for output.
+ */
+struct drm_printer {
+ void (*printfn)(struct drm_printer *p, struct va_format *vaf);
+ void *arg;
+};
+
+void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
+
+void drm_printf(struct drm_printer *p, const char *f, ...);
+
+
+/**
+ * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
+ * @f: the struct &seq_file to output to
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_seq_file,
+ .arg = f,
+ };
+ return p;
+}
+
+/**
+ * drm_info_printer - construct a &drm_printer that outputs to dev_printk()
+ * @dev: the struct &device pointer
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_info_printer(struct device *dev)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_info,
+ .arg = dev,
+ };
+ return p;
+}
+
+#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index b46fa0ef3005..545c6e0fea7d 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -64,7 +64,7 @@ struct i915_audio_component_ops {
* Called from audio driver. After audio driver sets the
* sample rate, it will call this function to set n/cts
*/
- int (*sync_audio_rate)(struct device *, int port, int rate);
+ int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
/**
* @get_eld: fill the audio state and ELD bytes for the given port
*
@@ -77,7 +77,7 @@ struct i915_audio_component_ops {
* Note that the returned size may be over @max_bytes. Then it
* implies that only a part of ELD has been copied to the buffer.
*/
- int (*get_eld)(struct device *, int port, bool *enabled,
+ int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
unsigned char *buf, int max_bytes);
};
@@ -97,7 +97,7 @@ struct i915_audio_component_audio_ops {
* status accordingly (even when the HDA controller is in power save
* mode).
*/
- void (*pin_eld_notify)(void *audio_ptr, int port);
+ void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
};
/**
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 9eb940d6755f..652e45be97c8 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -47,6 +47,8 @@ struct drm_mm_node;
struct ttm_placement;
+struct ttm_place;
+
/**
* struct ttm_bus_placement
*
@@ -209,7 +211,7 @@ struct ttm_buffer_object {
* Members protected by a bo reservation.
*/
- struct fence *moving;
+ struct dma_fence *moving;
struct drm_vma_offset_node vma_node;
@@ -396,6 +398,17 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
int resched);
/**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+
+/**
* ttm_bo_synccpu_write_grab
*
* @bo: The buffer object:
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4f0a92185995..cdbdb40eb5bd 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -303,7 +303,7 @@ struct ttm_mem_type_manager {
/*
* Protected by @move_lock.
*/
- struct fence *move;
+ struct dma_fence *move;
};
/**
@@ -371,9 +371,21 @@ struct ttm_bo_driver {
* submission as a consequence.
*/
- int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
- int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man);
+ int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
+ int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man);
+
+ /**
+ * struct ttm_bo_driver member eviction_valuable
+ *
+ * @bo: the buffer object to be evicted
+ * @place: placement we need room for
+ *
+ * Check with the driver if it is valuable to evict a BO to make room
+ * for a certain placement.
+ */
+ bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
/**
* struct ttm_bo_driver member evict_flags:
*
@@ -384,8 +396,9 @@ struct ttm_bo_driver {
* finished, they'll end up in bo->mem.flags
*/
- void(*evict_flags) (struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
/**
* struct ttm_bo_driver member move:
*
@@ -399,10 +412,9 @@ struct ttm_bo_driver {
*
* Move a buffer between two memory regions.
*/
- int (*move) (struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem);
+ int (*move)(struct ttm_buffer_object *bo, bool evict,
+ bool interruptible, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
/**
* struct ttm_bo_driver_member verify_access
@@ -416,8 +428,8 @@ struct ttm_bo_driver {
* access for all buffer objects.
* This function should return 0 if access is granted, -EPERM otherwise.
*/
- int (*verify_access) (struct ttm_buffer_object *bo,
- struct file *filp);
+ int (*verify_access)(struct ttm_buffer_object *bo,
+ struct file *filp);
/* hook to notify driver about a driver move so it
* can do tiling things */
@@ -430,7 +442,7 @@ struct ttm_bo_driver {
/**
* notify the driver that we're about to swap out this bo
*/
- void (*swap_notify) (struct ttm_buffer_object *bo);
+ void (*swap_notify)(struct ttm_buffer_object *bo);
/**
* Driver callback on when mapping io memory (for bo_move_memcpy
@@ -438,8 +450,10 @@ struct ttm_bo_driver {
* the mapping is not use anymore. io_mem_reserve & io_mem_free
* are balanced.
*/
- int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
- void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+ int (*io_mem_reserve)(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+ void (*io_mem_free)(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/**
* Optional driver callback for when BO is removed from the LRU.
@@ -1025,7 +1039,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
@@ -1040,7 +1054,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* immediately or hang it on a temporary buffer object.
*/
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b620c317c772..47f35b8e6d09 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -114,6 +114,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list,
- struct fence *fence);
+ struct dma_fence *fence);
#endif
diff --git a/include/dt-bindings/clock/hi3516cv300-clock.h b/include/dt-bindings/clock/hi3516cv300-clock.h
new file mode 100644
index 000000000000..5ba51b8388fc
--- /dev/null
+++ b/include/dt-bindings/clock/hi3516cv300-clock.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DTS_HI3516CV300_CLOCK_H
+#define __DTS_HI3516CV300_CLOCK_H
+
+/* hi3516CV300 core CRG */
+#define HI3516CV300_APB_CLK 0
+#define HI3516CV300_UART0_CLK 1
+#define HI3516CV300_UART1_CLK 2
+#define HI3516CV300_UART2_CLK 3
+#define HI3516CV300_SPI0_CLK 4
+#define HI3516CV300_SPI1_CLK 5
+#define HI3516CV300_FMC_CLK 6
+#define HI3516CV300_MMC0_CLK 7
+#define HI3516CV300_MMC1_CLK 8
+#define HI3516CV300_MMC2_CLK 9
+#define HI3516CV300_MMC3_CLK 10
+#define HI3516CV300_ETH_CLK 11
+#define HI3516CV300_ETH_MACIF_CLK 12
+#define HI3516CV300_DMAC_CLK 13
+#define HI3516CV300_PWM_CLK 14
+#define HI3516CV300_USB2_BUS_CLK 15
+#define HI3516CV300_USB2_OHCI48M_CLK 16
+#define HI3516CV300_USB2_OHCI12M_CLK 17
+#define HI3516CV300_USB2_OTG_UTMI_CLK 18
+#define HI3516CV300_USB2_HST_PHY_CLK 19
+#define HI3516CV300_USB2_UTMI0_CLK 20
+#define HI3516CV300_USB2_PHY_CLK 21
+
+/* hi3516CV300 sysctrl CRG */
+#define HI3516CV300_WDT_CLK 1
+
+#endif /* __DTS_HI3516CV300_CLOCK_H */
diff --git a/include/dt-bindings/clock/histb-clock.h b/include/dt-bindings/clock/histb-clock.h
new file mode 100644
index 000000000000..181c0f070f7c
--- /dev/null
+++ b/include/dt-bindings/clock/histb-clock.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DTS_HISTB_CLOCK_H
+#define __DTS_HISTB_CLOCK_H
+
+/* clocks provided by core CRG */
+#define HISTB_OSC_CLK 0
+#define HISTB_APB_CLK 1
+#define HISTB_AHB_CLK 2
+#define HISTB_UART1_CLK 3
+#define HISTB_UART2_CLK 4
+#define HISTB_UART3_CLK 5
+#define HISTB_I2C0_CLK 6
+#define HISTB_I2C1_CLK 7
+#define HISTB_I2C2_CLK 8
+#define HISTB_I2C3_CLK 9
+#define HISTB_I2C4_CLK 10
+#define HISTB_I2C5_CLK 11
+#define HISTB_SPI0_CLK 12
+#define HISTB_SPI1_CLK 13
+#define HISTB_SPI2_CLK 14
+#define HISTB_SCI_CLK 15
+#define HISTB_FMC_CLK 16
+#define HISTB_MMC_BIU_CLK 17
+#define HISTB_MMC_CIU_CLK 18
+#define HISTB_MMC_DRV_CLK 19
+#define HISTB_MMC_SAMPLE_CLK 20
+#define HISTB_SDIO0_BIU_CLK 21
+#define HISTB_SDIO0_CIU_CLK 22
+#define HISTB_SDIO0_DRV_CLK 23
+#define HISTB_SDIO0_SAMPLE_CLK 24
+#define HISTB_PCIE_AUX_CLK 25
+#define HISTB_PCIE_PIPE_CLK 26
+#define HISTB_PCIE_SYS_CLK 27
+#define HISTB_PCIE_BUS_CLK 28
+#define HISTB_ETH0_MAC_CLK 29
+#define HISTB_ETH0_MACIF_CLK 30
+#define HISTB_ETH1_MAC_CLK 31
+#define HISTB_ETH1_MACIF_CLK 32
+#define HISTB_COMBPHY1_CLK 33
+
+
+/* clocks provided by mcu CRG */
+#define HISTB_MCE_CLK 1
+#define HISTB_IR_CLK 2
+#define HISTB_TIMER01_CLK 3
+#define HISTB_LEDC_CLK 4
+#define HISTB_UART0_CLK 5
+#define HISTB_LSADC_CLK 6
+
+#endif /* __DTS_HISTB_CLOCK_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index fd8aee8f64ae..ee9f1a508d2f 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -236,6 +236,19 @@
#define IMX6UL_CLK_PLL3_120M 223
#define IMX6UL_CLK_KPP 224
-#define IMX6UL_CLK_END 225
+/* For i.MX6ULL */
+#define IMX6ULL_CLK_ESAI_PRED 225
+#define IMX6ULL_CLK_ESAI_PODF 226
+#define IMX6ULL_CLK_ESAI_EXTAL 227
+#define IMX6ULL_CLK_ESAI_MEM 228
+#define IMX6ULL_CLK_ESAI_IPG 229
+#define IMX6ULL_CLK_DCP_CLK 230
+#define IMX6ULL_CLK_EPDC_PRE_SEL 231
+#define IMX6ULL_CLK_EPDC_SEL 232
+#define IMX6ULL_CLK_EPDC_PODF 233
+#define IMX6ULL_CLK_EPDC_ACLK 234
+#define IMX6ULL_CLK_EPDC_PIX 235
+#define IMX6ULL_CLK_ESAI_SEL 236
+#define IMX6UL_CLK_END 237
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/oxsemi,ox810se.h b/include/dt-bindings/clock/oxsemi,ox810se.h
new file mode 100644
index 000000000000..d5facb5e83f1
--- /dev/null
+++ b/include/dt-bindings/clock/oxsemi,ox810se.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_CLOCK_OXSEMI_OX810SE_H
+#define DT_CLOCK_OXSEMI_OX810SE_H
+
+#define CLK_810_LEON 0
+#define CLK_810_DMA_SGDMA 1
+#define CLK_810_CIPHER 2
+#define CLK_810_SATA 3
+#define CLK_810_AUDIO 4
+#define CLK_810_USBMPH 5
+#define CLK_810_ETHA 6
+#define CLK_810_PCIEA 7
+#define CLK_810_NAND 8
+
+#endif /* DT_CLOCK_OXSEMI_OX810SE_H */
diff --git a/include/dt-bindings/clock/oxsemi,ox820.h b/include/dt-bindings/clock/oxsemi,ox820.h
new file mode 100644
index 000000000000..f661ecc8d760
--- /dev/null
+++ b/include/dt-bindings/clock/oxsemi,ox820.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_CLOCK_OXSEMI_OX820_H
+#define DT_CLOCK_OXSEMI_OX820_H
+
+/* PLLs */
+#define CLK_820_PLLA 0
+#define CLK_820_PLLB 1
+
+/* Gate Clocks */
+#define CLK_820_LEON 2
+#define CLK_820_DMA_SGDMA 3
+#define CLK_820_CIPHER 4
+#define CLK_820_SD 5
+#define CLK_820_SATA 6
+#define CLK_820_AUDIO 7
+#define CLK_820_USBMPH 8
+#define CLK_820_ETHA 9
+#define CLK_820_PCIEA 10
+#define CLK_820_NAND 11
+#define CLK_820_PCIEB 12
+#define CLK_820_ETHB 13
+#define CLK_820_REF600 14
+#define CLK_820_USBDEV 15
+
+#endif /* DT_CLOCK_OXSEMI_OX820_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h
new file mode 100644
index 000000000000..8fa535be2ebc
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8994_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8994_H
+
+#define GPLL0_EARLY 0
+#define GPLL0 1
+#define GPLL4_EARLY 2
+#define GPLL4 3
+#define UFS_AXI_CLK_SRC 4
+#define USB30_MASTER_CLK_SRC 5
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 6
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 7
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 8
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 9
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 10
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 11
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 12
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 13
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 14
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 15
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 16
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 17
+#define BLSP1_UART1_APPS_CLK_SRC 18
+#define BLSP1_UART2_APPS_CLK_SRC 19
+#define BLSP1_UART3_APPS_CLK_SRC 20
+#define BLSP1_UART4_APPS_CLK_SRC 21
+#define BLSP1_UART5_APPS_CLK_SRC 22
+#define BLSP1_UART6_APPS_CLK_SRC 23
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 24
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 25
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 26
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 27
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 28
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 29
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 30
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 31
+#define BLSP2_QUP5_I2C_APPS_CLK_SRC 32
+#define BLSP2_QUP5_SPI_APPS_CLK_SRC 33
+#define BLSP2_QUP6_I2C_APPS_CLK_SRC 34
+#define BLSP2_QUP6_SPI_APPS_CLK_SRC 35
+#define BLSP2_UART1_APPS_CLK_SRC 36
+#define BLSP2_UART2_APPS_CLK_SRC 37
+#define BLSP2_UART3_APPS_CLK_SRC 38
+#define BLSP2_UART4_APPS_CLK_SRC 39
+#define BLSP2_UART5_APPS_CLK_SRC 40
+#define BLSP2_UART6_APPS_CLK_SRC 41
+#define GP1_CLK_SRC 42
+#define GP2_CLK_SRC 43
+#define GP3_CLK_SRC 44
+#define PCIE_0_AUX_CLK_SRC 45
+#define PCIE_0_PIPE_CLK_SRC 46
+#define PCIE_1_AUX_CLK_SRC 47
+#define PCIE_1_PIPE_CLK_SRC 48
+#define PDM2_CLK_SRC 49
+#define SDCC1_APPS_CLK_SRC 50
+#define SDCC2_APPS_CLK_SRC 51
+#define SDCC3_APPS_CLK_SRC 52
+#define SDCC4_APPS_CLK_SRC 53
+#define TSIF_REF_CLK_SRC 54
+#define USB30_MOCK_UTMI_CLK_SRC 55
+#define USB3_PHY_AUX_CLK_SRC 56
+#define USB_HS_SYSTEM_CLK_SRC 57
+#define GCC_BLSP1_AHB_CLK 58
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 59
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 60
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 61
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 62
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 63
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 64
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 65
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 66
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 67
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 68
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 69
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 70
+#define GCC_BLSP1_UART1_APPS_CLK 71
+#define GCC_BLSP1_UART2_APPS_CLK 72
+#define GCC_BLSP1_UART3_APPS_CLK 73
+#define GCC_BLSP1_UART4_APPS_CLK 74
+#define GCC_BLSP1_UART5_APPS_CLK 75
+#define GCC_BLSP1_UART6_APPS_CLK 76
+#define GCC_BLSP2_AHB_CLK 77
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 78
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 79
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 80
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 81
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 82
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 83
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 84
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 85
+#define GCC_BLSP2_QUP5_I2C_APPS_CLK 86
+#define GCC_BLSP2_QUP5_SPI_APPS_CLK 87
+#define GCC_BLSP2_QUP6_I2C_APPS_CLK 88
+#define GCC_BLSP2_QUP6_SPI_APPS_CLK 89
+#define GCC_BLSP2_UART1_APPS_CLK 90
+#define GCC_BLSP2_UART2_APPS_CLK 91
+#define GCC_BLSP2_UART3_APPS_CLK 92
+#define GCC_BLSP2_UART4_APPS_CLK 93
+#define GCC_BLSP2_UART5_APPS_CLK 94
+#define GCC_BLSP2_UART6_APPS_CLK 95
+#define GCC_GP1_CLK 96
+#define GCC_GP2_CLK 97
+#define GCC_GP3_CLK 98
+#define GCC_PCIE_0_AUX_CLK 99
+#define GCC_PCIE_0_PIPE_CLK 100
+#define GCC_PCIE_1_AUX_CLK 101
+#define GCC_PCIE_1_PIPE_CLK 102
+#define GCC_PDM2_CLK 103
+#define GCC_SDCC1_APPS_CLK 104
+#define GCC_SDCC2_APPS_CLK 105
+#define GCC_SDCC3_APPS_CLK 106
+#define GCC_SDCC4_APPS_CLK 107
+#define GCC_SYS_NOC_UFS_AXI_CLK 108
+#define GCC_SYS_NOC_USB3_AXI_CLK 109
+#define GCC_TSIF_REF_CLK 110
+#define GCC_UFS_AXI_CLK 111
+#define GCC_UFS_RX_CFG_CLK 112
+#define GCC_UFS_TX_CFG_CLK 113
+#define GCC_USB30_MASTER_CLK 114
+#define GCC_USB30_MOCK_UTMI_CLK 115
+#define GCC_USB3_PHY_AUX_CLK 116
+#define GCC_USB_HS_SYSTEM_CLK 117
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
new file mode 100644
index 000000000000..5924cdb71336
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H
+#define _DT_BINDINGS_CLK_MSM_RPMCC_H
+
+/* apq8064 */
+#define RPM_PXO_CLK 0
+#define RPM_PXO_A_CLK 1
+#define RPM_CXO_CLK 2
+#define RPM_CXO_A_CLK 3
+#define RPM_APPS_FABRIC_CLK 4
+#define RPM_APPS_FABRIC_A_CLK 5
+#define RPM_CFPB_CLK 6
+#define RPM_CFPB_A_CLK 7
+#define RPM_QDSS_CLK 8
+#define RPM_QDSS_A_CLK 9
+#define RPM_DAYTONA_FABRIC_CLK 10
+#define RPM_DAYTONA_FABRIC_A_CLK 11
+#define RPM_EBI1_CLK 12
+#define RPM_EBI1_A_CLK 13
+#define RPM_MM_FABRIC_CLK 14
+#define RPM_MM_FABRIC_A_CLK 15
+#define RPM_MMFPB_CLK 16
+#define RPM_MMFPB_A_CLK 17
+#define RPM_SYS_FABRIC_CLK 18
+#define RPM_SYS_FABRIC_A_CLK 19
+#define RPM_SFPB_CLK 20
+#define RPM_SFPB_A_CLK 21
+
+/* msm8916 */
+#define RPM_SMD_XO_CLK_SRC 0
+#define RPM_SMD_XO_A_CLK_SRC 1
+#define RPM_SMD_PCNOC_CLK 2
+#define RPM_SMD_PCNOC_A_CLK 3
+#define RPM_SMD_SNOC_CLK 4
+#define RPM_SMD_SNOC_A_CLK 5
+#define RPM_SMD_BIMC_CLK 6
+#define RPM_SMD_BIMC_A_CLK 7
+#define RPM_SMD_QDSS_CLK 8
+#define RPM_SMD_QDSS_A_CLK 9
+#define RPM_SMD_BB_CLK1 10
+#define RPM_SMD_BB_CLK1_A 11
+#define RPM_SMD_BB_CLK2 12
+#define RPM_SMD_BB_CLK2_A 13
+#define RPM_SMD_RF_CLK1 14
+#define RPM_SMD_RF_CLK1_A 15
+#define RPM_SMD_RF_CLK2 16
+#define RPM_SMD_RF_CLK2_A 17
+#define RPM_SMD_BB_CLK1_PIN 18
+#define RPM_SMD_BB_CLK1_A_PIN 19
+#define RPM_SMD_BB_CLK2_PIN 20
+#define RPM_SMD_BB_CLK2_A_PIN 21
+#define RPM_SMD_RF_CLK1_PIN 22
+#define RPM_SMD_RF_CLK1_A_PIN 23
+#define RPM_SMD_RF_CLK2_PIN 24
+#define RPM_SMD_RF_CLK2_A_PIN 25
+
+#endif
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index 3cd813896d08..29e01ed10e74 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -28,6 +28,9 @@
/* MSTP7 */
#define R7S72100_CLK_ETHER 4
+/* MSTP8 */
+#define R7S72100_CLK_MMCIF 4
+
/* MSTP9 */
#define R7S72100_CLK_I2C0 7
#define R7S72100_CLK_I2C1 6
@@ -41,4 +44,8 @@
#define R7S72100_CLK_SPI3 4
#define R7S72100_CLK_SPI4 3
+/* MSTP12 */
+#define R7S72100_CLK_SDHI0 3
+#define R7S72100_CLK_SDHI1 2
+
#endif /* __DT_BINDINGS_CLOCK_R7S72100_H__ */
diff --git a/include/dt-bindings/clock/r8a7743-cpg-mssr.h b/include/dt-bindings/clock/r8a7743-cpg-mssr.h
new file mode 100644
index 000000000000..e1d1f3c6a99e
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7743-cpg-mssr.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7743 CPG Core Clocks */
+#define R8A7743_CLK_Z 0
+#define R8A7743_CLK_ZG 1
+#define R8A7743_CLK_ZTR 2
+#define R8A7743_CLK_ZTRD2 3
+#define R8A7743_CLK_ZT 4
+#define R8A7743_CLK_ZX 5
+#define R8A7743_CLK_ZS 6
+#define R8A7743_CLK_HP 7
+#define R8A7743_CLK_B 9
+#define R8A7743_CLK_LB 10
+#define R8A7743_CLK_P 11
+#define R8A7743_CLK_CL 12
+#define R8A7743_CLK_M2 13
+#define R8A7743_CLK_ZB3 15
+#define R8A7743_CLK_ZB3D2 16
+#define R8A7743_CLK_DDR 17
+#define R8A7743_CLK_SDH 18
+#define R8A7743_CLK_SD0 19
+#define R8A7743_CLK_SD2 20
+#define R8A7743_CLK_SD3 21
+#define R8A7743_CLK_MMC0 22
+#define R8A7743_CLK_MP 23
+#define R8A7743_CLK_QSPI 26
+#define R8A7743_CLK_CP 27
+#define R8A7743_CLK_RCAN 28
+#define R8A7743_CLK_R 29
+#define R8A7743_CLK_OSC 30
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7745-cpg-mssr.h b/include/dt-bindings/clock/r8a7745-cpg-mssr.h
new file mode 100644
index 000000000000..56ad6f0c6760
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7745-cpg-mssr.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7745 CPG Core Clocks */
+#define R8A7745_CLK_Z2 0
+#define R8A7745_CLK_ZG 1
+#define R8A7745_CLK_ZTR 2
+#define R8A7745_CLK_ZTRD2 3
+#define R8A7745_CLK_ZT 4
+#define R8A7745_CLK_ZX 5
+#define R8A7745_CLK_ZS 6
+#define R8A7745_CLK_HP 7
+#define R8A7745_CLK_B 9
+#define R8A7745_CLK_LB 10
+#define R8A7745_CLK_P 11
+#define R8A7745_CLK_CL 12
+#define R8A7745_CLK_CP 13
+#define R8A7745_CLK_M2 14
+#define R8A7745_CLK_ZB3 16
+#define R8A7745_CLK_ZB3D2 17
+#define R8A7745_CLK_DDR 18
+#define R8A7745_CLK_SDH 19
+#define R8A7745_CLK_SD0 20
+#define R8A7745_CLK_SD2 21
+#define R8A7745_CLK_SD3 22
+#define R8A7745_CLK_MMC0 23
+#define R8A7745_CLK_MP 24
+#define R8A7745_CLK_QSPI 25
+#define R8A7745_CLK_CPEX 26
+#define R8A7745_CLK_RCAN 27
+#define R8A7745_CLK_R 28
+#define R8A7745_CLK_OSC 29
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index 9d02f5317c7c..88e64846cf37 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -20,8 +20,7 @@
#define R8A7794_CLK_QSPI 5
#define R8A7794_CLK_SDH 6
#define R8A7794_CLK_SD0 7
-#define R8A7794_CLK_Z 8
-#define R8A7794_CLK_RCAN 9
+#define R8A7794_CLK_RCAN 8
/* MSTP0 */
#define R8A7794_CLK_MSIOF0 0
diff --git a/include/dt-bindings/clock/rk1108-cru.h b/include/dt-bindings/clock/rk1108-cru.h
new file mode 100644
index 000000000000..9350a5527a36
--- /dev/null
+++ b/include/dt-bindings/clock/rk1108-cru.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK1108_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK1108_H
+
+/* pll id */
+#define PLL_APLL 0
+#define PLL_DPLL 1
+#define PLL_GPLL 2
+#define ARMCLK 3
+
+/* sclk gates (special clocks) */
+#define SCLK_SPI0 65
+#define SCLK_NANDC 67
+#define SCLK_SDMMC 68
+#define SCLK_SDIO 69
+#define SCLK_EMMC 71
+#define SCLK_UART0 72
+#define SCLK_UART1 73
+#define SCLK_UART2 74
+#define SCLK_I2S0 75
+#define SCLK_I2S1 76
+#define SCLK_I2S2 77
+#define SCLK_TIMER0 78
+#define SCLK_TIMER1 79
+#define SCLK_SFC 80
+#define SCLK_SDMMC_DRV 81
+#define SCLK_SDIO_DRV 82
+#define SCLK_EMMC_DRV 83
+#define SCLK_SDMMC_SAMPLE 84
+#define SCLK_SDIO_SAMPLE 85
+#define SCLK_EMMC_SAMPLE 86
+
+/* aclk gates */
+#define ACLK_DMAC 192
+#define ACLK_PRE 193
+#define ACLK_CORE 194
+#define ACLK_ENMCORE 195
+
+/* pclk gates */
+#define PCLK_GPIO1 256
+#define PCLK_GPIO2 257
+#define PCLK_GPIO3 258
+#define PCLK_GRF 259
+#define PCLK_I2C1 260
+#define PCLK_I2C2 261
+#define PCLK_I2C3 262
+#define PCLK_SPI 263
+#define PCLK_SFC 264
+#define PCLK_UART0 265
+#define PCLK_UART1 266
+#define PCLK_UART2 267
+#define PCLK_TSADC 268
+#define PCLK_PWM 269
+#define PCLK_TIMER 270
+#define PCLK_PERI 271
+
+/* hclk gates */
+#define HCLK_I2S0_8CH 320
+#define HCLK_I2S1_8CH 321
+#define HCLK_I2S2_2CH 322
+#define HCLK_NANDC 323
+#define HCLK_SDMMC 324
+#define HCLK_SDIO 325
+#define HCLK_EMMC 326
+#define HCLK_PERI 327
+#define HCLK_SFC 328
+
+#define CLK_NR_CLKS (HCLK_SFC + 1)
+
+/* reset id */
+#define SRST_CORE_PO_AD 0
+#define SRST_CORE_AD 1
+#define SRST_L2_AD 2
+#define SRST_CPU_NIU_AD 3
+#define SRST_CORE_PO 4
+#define SRST_CORE 5
+#define SRST_L2 6
+#define SRST_CORE_DBG 8
+#define PRST_DBG 9
+#define RST_DAP 10
+#define PRST_DBG_NIU 11
+#define ARST_STRC_SYS_AD 15
+
+#define SRST_DDRPHY_CLKDIV 16
+#define SRST_DDRPHY 17
+#define PRST_DDRPHY 18
+#define PRST_HDMIPHY 19
+#define PRST_VDACPHY 20
+#define PRST_VADCPHY 21
+#define PRST_MIPI_CSI_PHY 22
+#define PRST_MIPI_DSI_PHY 23
+#define PRST_ACODEC 24
+#define ARST_BUS_NIU 25
+#define PRST_TOP_NIU 26
+#define ARST_INTMEM 27
+#define HRST_ROM 28
+#define ARST_DMAC 29
+#define SRST_MSCH_NIU 30
+#define PRST_MSCH_NIU 31
+
+#define PRST_DDRUPCTL 32
+#define NRST_DDRUPCTL 33
+#define PRST_DDRMON 34
+#define HRST_I2S0_8CH 35
+#define MRST_I2S0_8CH 36
+#define HRST_I2S1_2CH 37
+#define MRST_IS21_2CH 38
+#define HRST_I2S2_2CH 39
+#define MRST_I2S2_2CH 40
+#define HRST_CRYPTO 41
+#define SRST_CRYPTO 42
+#define PRST_SPI 43
+#define SRST_SPI 44
+#define PRST_UART0 45
+#define PRST_UART1 46
+#define PRST_UART2 47
+
+#define SRST_UART0 48
+#define SRST_UART1 49
+#define SRST_UART2 50
+#define PRST_I2C1 51
+#define PRST_I2C2 52
+#define PRST_I2C3 53
+#define SRST_I2C1 54
+#define SRST_I2C2 55
+#define SRST_I2C3 56
+#define PRST_PWM1 58
+#define SRST_PWM1 60
+#define PRST_WDT 61
+#define PRST_GPIO1 62
+#define PRST_GPIO2 63
+
+#define PRST_GPIO3 64
+#define PRST_GRF 65
+#define PRST_EFUSE 66
+#define PRST_EFUSE512 67
+#define PRST_TIMER0 68
+#define SRST_TIMER0 69
+#define SRST_TIMER1 70
+#define PRST_TSADC 71
+#define SRST_TSADC 72
+#define PRST_SARADC 73
+#define SRST_SARADC 74
+#define HRST_SYSBUS 75
+#define PRST_USBGRF 76
+
+#define ARST_PERIPH_NIU 80
+#define HRST_PERIPH_NIU 81
+#define PRST_PERIPH_NIU 82
+#define HRST_PERIPH 83
+#define HRST_SDMMC 84
+#define HRST_SDIO 85
+#define HRST_EMMC 86
+#define HRST_NANDC 87
+#define NRST_NANDC 88
+#define HRST_SFC 89
+#define SRST_SFC 90
+#define ARST_GMAC 91
+#define HRST_OTG 92
+#define SRST_OTG 93
+#define SRST_OTG_ADP 94
+#define HRST_HOST0 95
+
+#define HRST_HOST0_AUX 96
+#define HRST_HOST0_ARB 97
+#define SRST_HOST0_EHCIPHY 98
+#define SRST_HOST0_UTMI 99
+#define SRST_USBPOR 100
+#define SRST_UTMI0 101
+#define SRST_UTMI1 102
+
+#define ARST_VIO0_NIU 102
+#define ARST_VIO1_NIU 103
+#define HRST_VIO_NIU 104
+#define PRST_VIO_NIU 105
+#define ARST_VOP 106
+#define HRST_VOP 107
+#define DRST_VOP 108
+#define ARST_IEP 109
+#define HRST_IEP 110
+#define ARST_RGA 111
+#define HRST_RGA 112
+#define SRST_RGA 113
+#define PRST_CVBS 114
+#define PRST_HDMI 115
+#define SRST_HDMI 116
+#define PRST_MIPI_DSI 117
+
+#define ARST_ISP_NIU 118
+#define HRST_ISP_NIU 119
+#define HRST_ISP 120
+#define SRST_ISP 121
+#define ARST_VIP0 122
+#define HRST_VIP0 123
+#define PRST_VIP0 124
+#define ARST_VIP1 125
+#define HRST_VIP1 126
+#define PRST_VIP1 127
+#define ARST_VIP2 128
+#define HRST_VIP2 129
+#define PRST_VIP2 120
+#define ARST_VIP3 121
+#define HRST_VIP3 122
+#define PRST_VIP4 123
+
+#define PRST_CIF1TO4 124
+#define SRST_CVBS_CLK 125
+#define HRST_CVBS 126
+
+#define ARST_VPU_NIU 140
+#define HRST_VPU_NIU 141
+#define ARST_VPU 142
+#define HRST_VPU 143
+#define ARST_RKVDEC_NIU 144
+#define HRST_RKVDEC_NIU 145
+#define ARST_RKVDEC 146
+#define HRST_RKVDEC 147
+#define SRST_RKVDEC_CABAC 148
+#define SRST_RKVDEC_CORE 149
+#define ARST_RKVENC_NIU 150
+#define HRST_RKVENC_NIU 151
+#define ARST_RKVENC 152
+#define HRST_RKVENC 153
+#define SRST_RKVENC_CORE 154
+
+#define SRST_DSP_CORE 156
+#define SRST_DSP_SYS 157
+#define SRST_DSP_GLOBAL 158
+#define SRST_DSP_OECM 159
+#define PRST_DSP_IOP_NIU 160
+#define ARST_DSP_EPP_NIU 161
+#define ARST_DSP_EDP_NIU 162
+#define PRST_DSP_DBG_NIU 163
+#define PRST_DSP_CFG_NIU 164
+#define PRST_DSP_GRF 165
+#define PRST_DSP_MAILBOX 166
+#define PRST_DSP_INTC 167
+#define PRST_DSP_PFM_MON 169
+#define SRST_DSP_PFM_MON 170
+#define ARST_DSP_EDAP_NIU 171
+
+#define SRST_PMU 172
+#define SRST_PMU_I2C0 173
+#define PRST_PMU_I2C0 174
+#define PRST_PMU_GPIO0 175
+#define PRST_PMU_INTMEM 176
+#define PRST_PMU_PWM0 177
+#define SRST_PMU_PWM0 178
+#define PRST_PMU_GRF 179
+#define SRST_PMU_NIU 180
+#define SRST_PMU_PVTM 181
+#define ARST_DSP_EDP_PERF 184
+#define ARST_DSP_EPP_PERF 185
+
+#endif /* _DT_BINDINGS_CLK_ROCKCHIP_RK1108_H */
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index 4f53e70f68ee..d141c1f0c778 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -72,6 +72,8 @@
#define ACLK_IPP 200
#define ACLK_RGA 201
#define ACLK_CIF0 202
+#define ACLK_CPU 203
+#define ACLK_PERI 204
/* pclk gates */
#define PCLK_GRF 320
@@ -104,6 +106,8 @@
#define PCLK_EFUSE 347
#define PCLK_TZPC 348
#define PCLK_TSADC 349
+#define PCLK_CPU 350
+#define PCLK_PERI 351
/* hclk gates */
#define HCLK_SDMMC 448
@@ -126,8 +130,10 @@
#define HCLK_IPP 465
#define HCLK_RGA 466
#define HCLK_NANDC0 467
+#define HCLK_CPU 468
+#define HCLK_PERI 469
-#define CLK_NR_CLKS (HCLK_NANDC0 + 1)
+#define CLK_NR_CLKS (HCLK_PERI + 1)
/* soft-reset indices */
#define SRST_MCORE 2
diff --git a/include/dt-bindings/clock/stih415-clks.h b/include/dt-bindings/clock/stih415-clks.h
deleted file mode 100644
index d80caa68aebd..000000000000
--- a/include/dt-bindings/clock/stih415-clks.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * This header provides constants clk index STMicroelectronics
- * STiH415 SoC.
- */
-#ifndef _CLK_STIH415
-#define _CLK_STIH415
-
-/* CLOCKGEN A0 */
-#define CLK_ICN_REG 0
-#define CLK_ETH1_PHY 4
-
-/* CLOCKGEN A1 */
-#define CLK_ICN_IF_2 0
-#define CLK_GMAC0_PHY 3
-
-#endif
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
new file mode 100644
index 000000000000..370c0a0473fc
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_
+#define _DT_BINDINGS_CLK_SUN50I_A64_H_
+
+#define CLK_BUS_MIPI_DSI 28
+#define CLK_BUS_CE 29
+#define CLK_BUS_DMA 30
+#define CLK_BUS_MMC0 31
+#define CLK_BUS_MMC1 32
+#define CLK_BUS_MMC2 33
+#define CLK_BUS_NAND 34
+#define CLK_BUS_DRAM 35
+#define CLK_BUS_EMAC 36
+#define CLK_BUS_TS 37
+#define CLK_BUS_HSTIMER 38
+#define CLK_BUS_SPI0 39
+#define CLK_BUS_SPI1 40
+#define CLK_BUS_OTG 41
+#define CLK_BUS_EHCI0 42
+#define CLK_BUS_EHCI1 43
+#define CLK_BUS_OHCI0 44
+#define CLK_BUS_OHCI1 45
+#define CLK_BUS_VE 46
+#define CLK_BUS_TCON0 47
+#define CLK_BUS_TCON1 48
+#define CLK_BUS_DEINTERLACE 49
+#define CLK_BUS_CSI 50
+#define CLK_BUS_HDMI 51
+#define CLK_BUS_DE 52
+#define CLK_BUS_GPU 53
+#define CLK_BUS_MSGBOX 54
+#define CLK_BUS_SPINLOCK 55
+#define CLK_BUS_CODEC 56
+#define CLK_BUS_SPDIF 57
+#define CLK_BUS_PIO 58
+#define CLK_BUS_THS 59
+#define CLK_BUS_I2S0 60
+#define CLK_BUS_I2S1 61
+#define CLK_BUS_I2S2 62
+#define CLK_BUS_I2C0 63
+#define CLK_BUS_I2C1 64
+#define CLK_BUS_I2C2 65
+#define CLK_BUS_SCR 66
+#define CLK_BUS_UART0 67
+#define CLK_BUS_UART1 68
+#define CLK_BUS_UART2 69
+#define CLK_BUS_UART3 70
+#define CLK_BUS_UART4 71
+#define CLK_BUS_DBG 72
+#define CLK_THS 73
+#define CLK_NAND 74
+#define CLK_MMC0 75
+#define CLK_MMC1 76
+#define CLK_MMC2 77
+#define CLK_TS 78
+#define CLK_CE 79
+#define CLK_SPI0 80
+#define CLK_SPI1 81
+#define CLK_I2S0 82
+#define CLK_I2S1 83
+#define CLK_I2S2 84
+#define CLK_SPDIF 85
+#define CLK_USB_PHY0 86
+#define CLK_USB_PHY1 87
+#define CLK_USB_HSIC 88
+#define CLK_USB_HSIC_12M 89
+
+#define CLK_USB_OHCI0 91
+
+#define CLK_USB_OHCI1 93
+
+#define CLK_DRAM_VE 95
+#define CLK_DRAM_CSI 96
+#define CLK_DRAM_DEINTERLACE 97
+#define CLK_DRAM_TS 98
+#define CLK_DE 99
+#define CLK_TCON0 100
+#define CLK_TCON1 101
+#define CLK_DEINTERLACE 102
+#define CLK_CSI_MISC 103
+#define CLK_CSI_SCLK 104
+#define CLK_CSI_MCLK 105
+#define CLK_VE 106
+#define CLK_AC_DIG 107
+#define CLK_AC_DIG_4X 108
+#define CLK_AVS 109
+#define CLK_HDMI 110
+#define CLK_HDMI_DDC 111
+
+#define CLK_DSI_DPHY 113
+#define CLK_GPU 114
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_H_ */
diff --git a/include/dt-bindings/clock/tegra186-clock.h b/include/dt-bindings/clock/tegra186-clock.h
new file mode 100644
index 000000000000..f73d32098f99
--- /dev/null
+++ b/include/dt-bindings/clock/tegra186-clock.h
@@ -0,0 +1,940 @@
+/** @file */
+
+#ifndef _MACH_T186_CLK_T186_H
+#define _MACH_T186_CLK_T186_H
+
+/**
+ * @defgroup clock_ids Clock Identifiers
+ * @{
+ * @defgroup extern_input external input clocks
+ * @{
+ * @def TEGRA186_CLK_OSC
+ * @def TEGRA186_CLK_CLK_32K
+ * @def TEGRA186_CLK_DTV_INPUT
+ * @def TEGRA186_CLK_SOR0_PAD_CLKOUT
+ * @def TEGRA186_CLK_SOR1_PAD_CLKOUT
+ * @def TEGRA186_CLK_I2S1_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S2_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S3_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S4_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S5_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S6_SYNC_INPUT
+ * @def TEGRA186_CLK_SPDIFIN_SYNC_INPUT
+ * @}
+ *
+ * @defgroup extern_output external output clocks
+ * @{
+ * @def TEGRA186_CLK_EXTPERIPH1
+ * @def TEGRA186_CLK_EXTPERIPH2
+ * @def TEGRA186_CLK_EXTPERIPH3
+ * @def TEGRA186_CLK_EXTPERIPH4
+ * @}
+ *
+ * @defgroup display_clks display related clocks
+ * @{
+ * @def TEGRA186_CLK_CEC
+ * @def TEGRA186_CLK_DSIC
+ * @def TEGRA186_CLK_DSIC_LP
+ * @def TEGRA186_CLK_DSID
+ * @def TEGRA186_CLK_DSID_LP
+ * @def TEGRA186_CLK_DPAUX1
+ * @def TEGRA186_CLK_DPAUX
+ * @def TEGRA186_CLK_HDA2HDMICODEC
+ * @def TEGRA186_CLK_NVDISPLAY_DISP
+ * @def TEGRA186_CLK_NVDISPLAY_DSC
+ * @def TEGRA186_CLK_NVDISPLAY_P0
+ * @def TEGRA186_CLK_NVDISPLAY_P1
+ * @def TEGRA186_CLK_NVDISPLAY_P2
+ * @def TEGRA186_CLK_NVDISPLAYHUB
+ * @def TEGRA186_CLK_SOR_SAFE
+ * @def TEGRA186_CLK_SOR0
+ * @def TEGRA186_CLK_SOR0_OUT
+ * @def TEGRA186_CLK_SOR1
+ * @def TEGRA186_CLK_SOR1_OUT
+ * @def TEGRA186_CLK_DSI
+ * @def TEGRA186_CLK_MIPI_CAL
+ * @def TEGRA186_CLK_DSIA_LP
+ * @def TEGRA186_CLK_DSIB
+ * @def TEGRA186_CLK_DSIB_LP
+ * @}
+ *
+ * @defgroup camera_clks camera related clocks
+ * @{
+ * @def TEGRA186_CLK_NVCSI
+ * @def TEGRA186_CLK_NVCSILP
+ * @def TEGRA186_CLK_VI
+ * @}
+ *
+ * @defgroup audio_clks audio related clocks
+ * @{
+ * @def TEGRA186_CLK_ACLK
+ * @def TEGRA186_CLK_ADSP
+ * @def TEGRA186_CLK_ADSPNEON
+ * @def TEGRA186_CLK_AHUB
+ * @def TEGRA186_CLK_APE
+ * @def TEGRA186_CLK_APB2APE
+ * @def TEGRA186_CLK_AUD_MCLK
+ * @def TEGRA186_CLK_DMIC1
+ * @def TEGRA186_CLK_DMIC2
+ * @def TEGRA186_CLK_DMIC3
+ * @def TEGRA186_CLK_DMIC4
+ * @def TEGRA186_CLK_DSPK1
+ * @def TEGRA186_CLK_DSPK2
+ * @def TEGRA186_CLK_HDA
+ * @def TEGRA186_CLK_HDA2CODEC_2X
+ * @def TEGRA186_CLK_I2S1
+ * @def TEGRA186_CLK_I2S2
+ * @def TEGRA186_CLK_I2S3
+ * @def TEGRA186_CLK_I2S4
+ * @def TEGRA186_CLK_I2S5
+ * @def TEGRA186_CLK_I2S6
+ * @def TEGRA186_CLK_MAUD
+ * @def TEGRA186_CLK_PLL_A_OUT0
+ * @def TEGRA186_CLK_SPDIF_DOUBLER
+ * @def TEGRA186_CLK_SPDIF_IN
+ * @def TEGRA186_CLK_SPDIF_OUT
+ * @def TEGRA186_CLK_SYNC_DMIC1
+ * @def TEGRA186_CLK_SYNC_DMIC2
+ * @def TEGRA186_CLK_SYNC_DMIC3
+ * @def TEGRA186_CLK_SYNC_DMIC4
+ * @def TEGRA186_CLK_SYNC_DMIC5
+ * @def TEGRA186_CLK_SYNC_DSPK1
+ * @def TEGRA186_CLK_SYNC_DSPK2
+ * @def TEGRA186_CLK_SYNC_I2S1
+ * @def TEGRA186_CLK_SYNC_I2S2
+ * @def TEGRA186_CLK_SYNC_I2S3
+ * @def TEGRA186_CLK_SYNC_I2S4
+ * @def TEGRA186_CLK_SYNC_I2S5
+ * @def TEGRA186_CLK_SYNC_I2S6
+ * @def TEGRA186_CLK_SYNC_SPDIF
+ * @}
+ *
+ * @defgroup uart_clks UART clocks
+ * @{
+ * @def TEGRA186_CLK_AON_UART_FST_MIPI_CAL
+ * @def TEGRA186_CLK_UARTA
+ * @def TEGRA186_CLK_UARTB
+ * @def TEGRA186_CLK_UARTC
+ * @def TEGRA186_CLK_UARTD
+ * @def TEGRA186_CLK_UARTE
+ * @def TEGRA186_CLK_UARTF
+ * @def TEGRA186_CLK_UARTG
+ * @def TEGRA186_CLK_UART_FST_MIPI_CAL
+ * @}
+ *
+ * @defgroup i2c_clks I2C clocks
+ * @{
+ * @def TEGRA186_CLK_AON_I2C_SLOW
+ * @def TEGRA186_CLK_I2C1
+ * @def TEGRA186_CLK_I2C2
+ * @def TEGRA186_CLK_I2C3
+ * @def TEGRA186_CLK_I2C4
+ * @def TEGRA186_CLK_I2C5
+ * @def TEGRA186_CLK_I2C6
+ * @def TEGRA186_CLK_I2C8
+ * @def TEGRA186_CLK_I2C9
+ * @def TEGRA186_CLK_I2C1
+ * @def TEGRA186_CLK_I2C12
+ * @def TEGRA186_CLK_I2C13
+ * @def TEGRA186_CLK_I2C14
+ * @def TEGRA186_CLK_I2C_SLOW
+ * @def TEGRA186_CLK_VI_I2C
+ * @}
+ *
+ * @defgroup spi_clks SPI clocks
+ * @{
+ * @def TEGRA186_CLK_SPI1
+ * @def TEGRA186_CLK_SPI2
+ * @def TEGRA186_CLK_SPI3
+ * @def TEGRA186_CLK_SPI4
+ * @}
+ *
+ * @defgroup storage storage related clocks
+ * @{
+ * @def TEGRA186_CLK_SATA
+ * @def TEGRA186_CLK_SATA_OOB
+ * @def TEGRA186_CLK_SATA_IOBIST
+ * @def TEGRA186_CLK_SDMMC_LEGACY_TM
+ * @def TEGRA186_CLK_SDMMC1
+ * @def TEGRA186_CLK_SDMMC2
+ * @def TEGRA186_CLK_SDMMC3
+ * @def TEGRA186_CLK_SDMMC4
+ * @def TEGRA186_CLK_QSPI
+ * @def TEGRA186_CLK_QSPI_OUT
+ * @def TEGRA186_CLK_UFSDEV_REF
+ * @def TEGRA186_CLK_UFSHC
+ * @}
+ *
+ * @defgroup pwm_clks PWM clocks
+ * @{
+ * @def TEGRA186_CLK_PWM1
+ * @def TEGRA186_CLK_PWM2
+ * @def TEGRA186_CLK_PWM3
+ * @def TEGRA186_CLK_PWM4
+ * @def TEGRA186_CLK_PWM5
+ * @def TEGRA186_CLK_PWM6
+ * @def TEGRA186_CLK_PWM7
+ * @def TEGRA186_CLK_PWM8
+ * @}
+ *
+ * @defgroup plls PLLs and related clocks
+ * @{
+ * @def TEGRA186_CLK_PLLREFE_OUT_GATED
+ * @def TEGRA186_CLK_PLLREFE_OUT1
+ * @def TEGRA186_CLK_PLLD_OUT1
+ * @def TEGRA186_CLK_PLLP_OUT0
+ * @def TEGRA186_CLK_PLLP_OUT5
+ * @def TEGRA186_CLK_PLLA
+ * @def TEGRA186_CLK_PLLE_PWRSEQ
+ * @def TEGRA186_CLK_PLLA_OUT1
+ * @def TEGRA186_CLK_PLLREFE_REF
+ * @def TEGRA186_CLK_UPHY_PLL0_PWRSEQ
+ * @def TEGRA186_CLK_UPHY_PLL1_PWRSEQ
+ * @def TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH
+ * @def TEGRA186_CLK_PLLREFE_PEX
+ * @def TEGRA186_CLK_PLLREFE_IDDQ
+ * @def TEGRA186_CLK_PLLC_OUT_AON
+ * @def TEGRA186_CLK_PLLC_OUT_ISP
+ * @def TEGRA186_CLK_PLLC_OUT_VE
+ * @def TEGRA186_CLK_PLLC4_OUT
+ * @def TEGRA186_CLK_PLLREFE_OUT
+ * @def TEGRA186_CLK_PLLREFE_PLL_REF
+ * @def TEGRA186_CLK_PLLE
+ * @def TEGRA186_CLK_PLLC
+ * @def TEGRA186_CLK_PLLP
+ * @def TEGRA186_CLK_PLLD
+ * @def TEGRA186_CLK_PLLD2
+ * @def TEGRA186_CLK_PLLREFE_VCO
+ * @def TEGRA186_CLK_PLLC2
+ * @def TEGRA186_CLK_PLLC3
+ * @def TEGRA186_CLK_PLLDP
+ * @def TEGRA186_CLK_PLLC4_VCO
+ * @def TEGRA186_CLK_PLLA1
+ * @def TEGRA186_CLK_PLLNVCSI
+ * @def TEGRA186_CLK_PLLDISPHUB
+ * @def TEGRA186_CLK_PLLD3
+ * @def TEGRA186_CLK_PLLBPMPCAM
+ * @def TEGRA186_CLK_PLLAON
+ * @def TEGRA186_CLK_PLLU
+ * @def TEGRA186_CLK_PLLC4_VCO_DIV2
+ * @def TEGRA186_CLK_PLL_REF
+ * @def TEGRA186_CLK_PLLREFE_OUT1_DIV5
+ * @def TEGRA186_CLK_UTMIP_PLL_PWRSEQ
+ * @def TEGRA186_CLK_PLL_U_48M
+ * @def TEGRA186_CLK_PLL_U_480M
+ * @def TEGRA186_CLK_PLLC4_OUT0
+ * @def TEGRA186_CLK_PLLC4_OUT1
+ * @def TEGRA186_CLK_PLLC4_OUT2
+ * @def TEGRA186_CLK_PLLC4_OUT_MUX
+ * @def TEGRA186_CLK_DFLLDISP_DIV
+ * @def TEGRA186_CLK_PLLDISPHUB_DIV
+ * @def TEGRA186_CLK_PLLP_DIV8
+ * @}
+ *
+ * @defgroup nafll_clks NAFLL clock sources
+ * @{
+ * @def TEGRA186_CLK_NAFLL_AXI_CBB
+ * @def TEGRA186_CLK_NAFLL_BCPU
+ * @def TEGRA186_CLK_NAFLL_BPMP
+ * @def TEGRA186_CLK_NAFLL_DISP
+ * @def TEGRA186_CLK_NAFLL_GPU
+ * @def TEGRA186_CLK_NAFLL_ISP
+ * @def TEGRA186_CLK_NAFLL_MCPU
+ * @def TEGRA186_CLK_NAFLL_NVDEC
+ * @def TEGRA186_CLK_NAFLL_NVENC
+ * @def TEGRA186_CLK_NAFLL_NVJPG
+ * @def TEGRA186_CLK_NAFLL_SCE
+ * @def TEGRA186_CLK_NAFLL_SE
+ * @def TEGRA186_CLK_NAFLL_TSEC
+ * @def TEGRA186_CLK_NAFLL_TSECB
+ * @def TEGRA186_CLK_NAFLL_VI
+ * @def TEGRA186_CLK_NAFLL_VIC
+ * @}
+ *
+ * @defgroup mphy MPHY related clocks
+ * @{
+ * @def TEGRA186_CLK_MPHY_L0_RX_SYMB
+ * @def TEGRA186_CLK_MPHY_L0_RX_LS_BIT
+ * @def TEGRA186_CLK_MPHY_L0_TX_SYMB
+ * @def TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT
+ * @def TEGRA186_CLK_MPHY_L0_RX_ANA
+ * @def TEGRA186_CLK_MPHY_L1_RX_ANA
+ * @def TEGRA186_CLK_MPHY_IOBIST
+ * @def TEGRA186_CLK_MPHY_TX_1MHZ_REF
+ * @def TEGRA186_CLK_MPHY_CORE_PLL_FIXED
+ * @}
+ *
+ * @defgroup eavb EAVB related clocks
+ * @{
+ * @def TEGRA186_CLK_EQOS_AXI
+ * @def TEGRA186_CLK_EQOS_PTP_REF
+ * @def TEGRA186_CLK_EQOS_RX
+ * @def TEGRA186_CLK_EQOS_RX_INPUT
+ * @def TEGRA186_CLK_EQOS_TX
+ * @}
+ *
+ * @defgroup usb USB related clocks
+ * @{
+ * @def TEGRA186_CLK_PEX_USB_PAD0_MGMT
+ * @def TEGRA186_CLK_PEX_USB_PAD1_MGMT
+ * @def TEGRA186_CLK_HSIC_TRK
+ * @def TEGRA186_CLK_USB2_TRK
+ * @def TEGRA186_CLK_USB2_HSIC_TRK
+ * @def TEGRA186_CLK_XUSB_CORE_SS
+ * @def TEGRA186_CLK_XUSB_CORE_DEV
+ * @def TEGRA186_CLK_XUSB_FALCON
+ * @def TEGRA186_CLK_XUSB_FS
+ * @def TEGRA186_CLK_XUSB
+ * @def TEGRA186_CLK_XUSB_DEV
+ * @def TEGRA186_CLK_XUSB_HOST
+ * @def TEGRA186_CLK_XUSB_SS
+ * @}
+ *
+ * @defgroup bigblock compute block related clocks
+ * @{
+ * @def TEGRA186_CLK_GPCCLK
+ * @def TEGRA186_CLK_GPC2CLK
+ * @def TEGRA186_CLK_GPU
+ * @def TEGRA186_CLK_HOST1X
+ * @def TEGRA186_CLK_ISP
+ * @def TEGRA186_CLK_NVDEC
+ * @def TEGRA186_CLK_NVENC
+ * @def TEGRA186_CLK_NVJPG
+ * @def TEGRA186_CLK_SE
+ * @def TEGRA186_CLK_TSEC
+ * @def TEGRA186_CLK_TSECB
+ * @def TEGRA186_CLK_VIC
+ * @}
+ *
+ * @defgroup can CAN bus related clocks
+ * @{
+ * @def TEGRA186_CLK_CAN1
+ * @def TEGRA186_CLK_CAN1_HOST
+ * @def TEGRA186_CLK_CAN2
+ * @def TEGRA186_CLK_CAN2_HOST
+ * @}
+ *
+ * @defgroup system basic system clocks
+ * @{
+ * @def TEGRA186_CLK_ACTMON
+ * @def TEGRA186_CLK_AON_APB
+ * @def TEGRA186_CLK_AON_CPU_NIC
+ * @def TEGRA186_CLK_AON_NIC
+ * @def TEGRA186_CLK_AXI_CBB
+ * @def TEGRA186_CLK_BPMP_APB
+ * @def TEGRA186_CLK_BPMP_CPU_NIC
+ * @def TEGRA186_CLK_BPMP_NIC_RATE
+ * @def TEGRA186_CLK_CLK_M
+ * @def TEGRA186_CLK_EMC
+ * @def TEGRA186_CLK_MSS_ENCRYPT
+ * @def TEGRA186_CLK_SCE_APB
+ * @def TEGRA186_CLK_SCE_CPU_NIC
+ * @def TEGRA186_CLK_SCE_NIC
+ * @def TEGRA186_CLK_TSC
+ * @}
+ *
+ * @defgroup pcie_clks PCIe related clocks
+ * @{
+ * @def TEGRA186_CLK_AFI
+ * @def TEGRA186_CLK_PCIE
+ * @def TEGRA186_CLK_PCIE2_IOBIST
+ * @def TEGRA186_CLK_PCIERX0
+ * @def TEGRA186_CLK_PCIERX1
+ * @def TEGRA186_CLK_PCIERX2
+ * @def TEGRA186_CLK_PCIERX3
+ * @def TEGRA186_CLK_PCIERX4
+ * @}
+ */
+
+/** @brief output of gate CLK_ENB_FUSE */
+#define TEGRA186_CLK_FUSE 0
+/**
+ * @brief It's not what you think
+ * @details output of gate CLK_ENB_GPU. This output connects to the GPU
+ * pwrclk. @warning: This is almost certainly not the clock you think
+ * it is. If you're looking for the clock of the graphics engine, see
+ * TEGRA186_GPCCLK
+ */
+#define TEGRA186_CLK_GPU 1
+/** @brief output of gate CLK_ENB_PCIE */
+#define TEGRA186_CLK_PCIE 3
+/** @brief output of the divider IPFS_CLK_DIVISOR */
+#define TEGRA186_CLK_AFI 4
+/** @brief output of gate CLK_ENB_PCIE2_IOBIST */
+#define TEGRA186_CLK_PCIE2_IOBIST 5
+/** @brief output of gate CLK_ENB_PCIERX0*/
+#define TEGRA186_CLK_PCIERX0 6
+/** @brief output of gate CLK_ENB_PCIERX1*/
+#define TEGRA186_CLK_PCIERX1 7
+/** @brief output of gate CLK_ENB_PCIERX2*/
+#define TEGRA186_CLK_PCIERX2 8
+/** @brief output of gate CLK_ENB_PCIERX3*/
+#define TEGRA186_CLK_PCIERX3 9
+/** @brief output of gate CLK_ENB_PCIERX4*/
+#define TEGRA186_CLK_PCIERX4 10
+/** @brief output branch of PLL_C for ISP, controlled by gate CLK_ENB_PLLC_OUT_ISP */
+#define TEGRA186_CLK_PLLC_OUT_ISP 11
+/** @brief output branch of PLL_C for VI, controlled by gate CLK_ENB_PLLC_OUT_VE */
+#define TEGRA186_CLK_PLLC_OUT_VE 12
+/** @brief output branch of PLL_C for AON domain, controlled by gate CLK_ENB_PLLC_OUT_AON */
+#define TEGRA186_CLK_PLLC_OUT_AON 13
+/** @brief output of gate CLK_ENB_SOR_SAFE */
+#define TEGRA186_CLK_SOR_SAFE 39
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */
+#define TEGRA186_CLK_I2S2 42
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */
+#define TEGRA186_CLK_I2S3 43
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDF_IN */
+#define TEGRA186_CLK_SPDIF_IN 44
+/** @brief output of gate CLK_ENB_SPDIF_DOUBLER */
+#define TEGRA186_CLK_SPDIF_DOUBLER 45
+/** @clkdesc{spi_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_SPI3} */
+#define TEGRA186_CLK_SPI3 46
+/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C1} */
+#define TEGRA186_CLK_I2C1 47
+/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C5} */
+#define TEGRA186_CLK_I2C5 48
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI1 */
+#define TEGRA186_CLK_SPI1 49
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ISP */
+#define TEGRA186_CLK_ISP 50
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI */
+#define TEGRA186_CLK_VI 51
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1 */
+#define TEGRA186_CLK_SDMMC1 52
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC2 */
+#define TEGRA186_CLK_SDMMC2 53
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */
+#define TEGRA186_CLK_SDMMC4 54
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */
+#define TEGRA186_CLK_UARTA 55
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTB */
+#define TEGRA186_CLK_UARTB 56
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */
+#define TEGRA186_CLK_HOST1X 57
+/**
+ * @brief controls the EMC clock frequency.
+ * @details Doing a clk_set_rate on this clock will select the
+ * appropriate clock source, program the source rate and execute a
+ * specific sequence to switch to the new clock source for both memory
+ * controllers. This can be used to control the balance between memory
+ * throughput and memory controller power.
+ */
+#define TEGRA186_CLK_EMC 58
+/* @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH4 */
+#define TEGRA186_CLK_EXTPERIPH4 73
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI4 */
+#define TEGRA186_CLK_SPI4 74
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */
+#define TEGRA186_CLK_I2C3 75
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC3 */
+#define TEGRA186_CLK_SDMMC3 76
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTD */
+#define TEGRA186_CLK_UARTD 77
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */
+#define TEGRA186_CLK_I2S1 79
+/** output of gate CLK_ENB_DTV */
+#define TEGRA186_CLK_DTV 80
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSEC */
+#define TEGRA186_CLK_TSEC 81
+/** @brief output of gate CLK_ENB_DP2 */
+#define TEGRA186_CLK_DP2 82
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */
+#define TEGRA186_CLK_I2S4 84
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */
+#define TEGRA186_CLK_I2S5 85
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */
+#define TEGRA186_CLK_I2C4 86
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */
+#define TEGRA186_CLK_AHUB 87
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA2CODEC_2X */
+#define TEGRA186_CLK_HDA2CODEC_2X 88
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH1 */
+#define TEGRA186_CLK_EXTPERIPH1 89
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH2 */
+#define TEGRA186_CLK_EXTPERIPH2 90
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH3 */
+#define TEGRA186_CLK_EXTPERIPH3 91
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C_SLOW */
+#define TEGRA186_CLK_I2C_SLOW 92
+/** @brief output of the SOR1_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */
+#define TEGRA186_CLK_SOR1 93
+/** @brief output of gate CLK_ENB_CEC */
+#define TEGRA186_CLK_CEC 94
+/** @brief output of gate CLK_ENB_DPAUX1 */
+#define TEGRA186_CLK_DPAUX1 95
+/** @brief output of gate CLK_ENB_DPAUX */
+#define TEGRA186_CLK_DPAUX 96
+/** @brief output of the SOR0_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */
+#define TEGRA186_CLK_SOR0 97
+/** @brief output of gate CLK_ENB_HDA2HDMICODEC */
+#define TEGRA186_CLK_HDA2HDMICODEC 98
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SATA */
+#define TEGRA186_CLK_SATA 99
+/** @brief output of gate CLK_ENB_SATA_OOB */
+#define TEGRA186_CLK_SATA_OOB 100
+/** @brief output of gate CLK_ENB_SATA_IOBIST */
+#define TEGRA186_CLK_SATA_IOBIST 101
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA */
+#define TEGRA186_CLK_HDA 102
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SE */
+#define TEGRA186_CLK_SE 103
+/** @brief output of gate CLK_ENB_APB2APE */
+#define TEGRA186_CLK_APB2APE 104
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */
+#define TEGRA186_CLK_APE 105
+/** @brief output of gate CLK_ENB_IQC1 */
+#define TEGRA186_CLK_IQC1 106
+/** @brief output of gate CLK_ENB_IQC2 */
+#define TEGRA186_CLK_IQC2 107
+/** divide by 2 version of TEGRA186_CLK_PLLREFE_VCO */
+#define TEGRA186_CLK_PLLREFE_OUT 108
+/** @brief output of gate CLK_ENB_PLLREFE_PLL_REF */
+#define TEGRA186_CLK_PLLREFE_PLL_REF 109
+/** @brief output of gate CLK_ENB_PLLC4_OUT */
+#define TEGRA186_CLK_PLLC4_OUT 110
+/** @brief output of mux xusb_core_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB 111
+/** controls xusb_dev_ce signal on page 66 and 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_DEV 112
+/** controls xusb_host_ce signal on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_HOST 113
+/** controls xusb_ss_ce signal on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_SS 114
+/** @brief output of gate CLK_ENB_DSI */
+#define TEGRA186_CLK_DSI 115
+/** @brief output of gate CLK_ENB_MIPI_CAL */
+#define TEGRA186_CLK_MIPI_CAL 116
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIA_LP */
+#define TEGRA186_CLK_DSIA_LP 117
+/** @brief output of gate CLK_ENB_DSIB */
+#define TEGRA186_CLK_DSIB 118
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIB_LP */
+#define TEGRA186_CLK_DSIB_LP 119
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */
+#define TEGRA186_CLK_DMIC1 122
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */
+#define TEGRA186_CLK_DMIC2 123
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */
+#define TEGRA186_CLK_AUD_MCLK 124
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA186_CLK_I2C6 125
+/**output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UART_FST_MIPI_CAL */
+#define TEGRA186_CLK_UART_FST_MIPI_CAL 126
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */
+#define TEGRA186_CLK_VIC 127
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM */
+#define TEGRA186_CLK_SDMMC_LEGACY_TM 128
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDEC */
+#define TEGRA186_CLK_NVDEC 129
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG */
+#define TEGRA186_CLK_NVJPG 130
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVENC */
+#define TEGRA186_CLK_NVENC 131
+/** @brief output of the QSPI_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */
+#define TEGRA186_CLK_QSPI 132
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI_I2C */
+#define TEGRA186_CLK_VI_I2C 133
+/** @brief output of gate CLK_ENB_HSIC_TRK */
+#define TEGRA186_CLK_HSIC_TRK 134
+/** @brief output of gate CLK_ENB_USB2_TRK */
+#define TEGRA186_CLK_USB2_TRK 135
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MAUD */
+#define TEGRA186_CLK_MAUD 136
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSECB */
+#define TEGRA186_CLK_TSECB 137
+/** @brief output of gate CLK_ENB_ADSP */
+#define TEGRA186_CLK_ADSP 138
+/** @brief output of gate CLK_ENB_ADSPNEON */
+#define TEGRA186_CLK_ADSPNEON 139
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_RX_LS_SYMB */
+#define TEGRA186_CLK_MPHY_L0_RX_SYMB 140
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_LS_BIT */
+#define TEGRA186_CLK_MPHY_L0_RX_LS_BIT 141
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_TX_LS_SYMB */
+#define TEGRA186_CLK_MPHY_L0_TX_SYMB 142
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_LS_3XBIT */
+#define TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT 143
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_ANA */
+#define TEGRA186_CLK_MPHY_L0_RX_ANA 144
+/** @brief output of gate CLK_ENB_MPHY_L1_RX_ANA */
+#define TEGRA186_CLK_MPHY_L1_RX_ANA 145
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_IOBIST */
+#define TEGRA186_CLK_MPHY_IOBIST 146
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_TX_1MHZ_REF */
+#define TEGRA186_CLK_MPHY_TX_1MHZ_REF 147
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_CORE_PLL_FIXED */
+#define TEGRA186_CLK_MPHY_CORE_PLL_FIXED 148
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AXI_CBB */
+#define TEGRA186_CLK_AXI_CBB 149
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */
+#define TEGRA186_CLK_DMIC3 150
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */
+#define TEGRA186_CLK_DMIC4 151
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */
+#define TEGRA186_CLK_DSPK1 152
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */
+#define TEGRA186_CLK_DSPK2 153
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA186_CLK_I2S6 154
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P0 */
+#define TEGRA186_CLK_NVDISPLAY_P0 155
+/** @brief output of the NVDISPLAY_DISP_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP */
+#define TEGRA186_CLK_NVDISPLAY_DISP 156
+/** @brief output of gate CLK_ENB_NVDISPLAY_DSC */
+#define TEGRA186_CLK_NVDISPLAY_DSC 157
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAYHUB */
+#define TEGRA186_CLK_NVDISPLAYHUB 158
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P1 */
+#define TEGRA186_CLK_NVDISPLAY_P1 159
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P2 */
+#define TEGRA186_CLK_NVDISPLAY_P2 160
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH */
+#define TEGRA186_CLK_TACH 166
+/** @brief output of gate CLK_ENB_EQOS */
+#define TEGRA186_CLK_EQOS_AXI 167
+/** @brief output of gate CLK_ENB_EQOS_RX */
+#define TEGRA186_CLK_EQOS_RX 168
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSHC_CG_SYS */
+#define TEGRA186_CLK_UFSHC 178
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSDEV_REF */
+#define TEGRA186_CLK_UFSDEV_REF 179
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSI */
+#define TEGRA186_CLK_NVCSI 180
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSILP */
+#define TEGRA186_CLK_NVCSILP 181
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */
+#define TEGRA186_CLK_I2C7 182
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */
+#define TEGRA186_CLK_I2C9 183
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C12 */
+#define TEGRA186_CLK_I2C12 184
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C13 */
+#define TEGRA186_CLK_I2C13 185
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C14 */
+#define TEGRA186_CLK_I2C14 186
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */
+#define TEGRA186_CLK_PWM1 187
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */
+#define TEGRA186_CLK_PWM2 188
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */
+#define TEGRA186_CLK_PWM3 189
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */
+#define TEGRA186_CLK_PWM5 190
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */
+#define TEGRA186_CLK_PWM6 191
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */
+#define TEGRA186_CLK_PWM7 192
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */
+#define TEGRA186_CLK_PWM8 193
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTE */
+#define TEGRA186_CLK_UARTE 194
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTF */
+#define TEGRA186_CLK_UARTF 195
+/** @deprecated */
+#define TEGRA186_CLK_DBGAPB 196
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_CPU_NIC */
+#define TEGRA186_CLK_BPMP_CPU_NIC 197
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_APB */
+#define TEGRA186_CLK_BPMP_APB 199
+/** @brief output of mux controlled by TEGRA186_CLK_SOC_ACTMON */
+#define TEGRA186_CLK_ACTMON 201
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_CPU_NIC */
+#define TEGRA186_CLK_AON_CPU_NIC 208
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN1 */
+#define TEGRA186_CLK_CAN1 210
+/** @brief output of gate CLK_ENB_CAN1_HOST */
+#define TEGRA186_CLK_CAN1_HOST 211
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN2 */
+#define TEGRA186_CLK_CAN2 212
+/** @brief output of gate CLK_ENB_CAN2_HOST */
+#define TEGRA186_CLK_CAN2_HOST 213
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_APB */
+#define TEGRA186_CLK_AON_APB 214
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTC */
+#define TEGRA186_CLK_UARTC 215
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTG */
+#define TEGRA186_CLK_UARTG 216
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_UART_FST_MIPI_CAL */
+#define TEGRA186_CLK_AON_UART_FST_MIPI_CAL 217
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */
+#define TEGRA186_CLK_I2C2 218
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */
+#define TEGRA186_CLK_I2C8 219
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C10 */
+#define TEGRA186_CLK_I2C10 220
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_I2C_SLOW */
+#define TEGRA186_CLK_AON_I2C_SLOW 221
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI2 */
+#define TEGRA186_CLK_SPI2 222
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC5 */
+#define TEGRA186_CLK_DMIC5 223
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_TOUCH */
+#define TEGRA186_CLK_AON_TOUCH 224
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */
+#define TEGRA186_CLK_PWM4 225
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSC. This clock object is read only and is used for all timers in the system. */
+#define TEGRA186_CLK_TSC 226
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MSS_ENCRYPT */
+#define TEGRA186_CLK_MSS_ENCRYPT 227
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_CPU_NIC */
+#define TEGRA186_CLK_SCE_CPU_NIC 228
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_APB */
+#define TEGRA186_CLK_SCE_APB 230
+/** @brief output of gate CLK_ENB_DSIC */
+#define TEGRA186_CLK_DSIC 231
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIC_LP */
+#define TEGRA186_CLK_DSIC_LP 232
+/** @brief output of gate CLK_ENB_DSID */
+#define TEGRA186_CLK_DSID 233
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSID_LP */
+#define TEGRA186_CLK_DSID_LP 234
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_SATA_USB_RX_BYP */
+#define TEGRA186_CLK_PEX_SATA_USB_RX_BYP 236
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT */
+#define TEGRA186_CLK_SPDIF_OUT 238
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_PTP_REF_CLK_0 */
+#define TEGRA186_CLK_EQOS_PTP_REF 239
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK */
+#define TEGRA186_CLK_EQOS_TX 240
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_USB2_HSIC_TRK */
+#define TEGRA186_CLK_USB2_HSIC_TRK 241
+/** @brief output of mux xusb_ss_clk_switch on page 66 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_CORE_SS 242
+/** @brief output of mux xusb_core_dev_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_CORE_DEV 243
+/** @brief output of mux xusb_core_falcon_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_FALCON 244
+/** @brief output of mux xusb_fs_clk_switch on page 66 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_FS 245
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */
+#define TEGRA186_CLK_PLL_A_OUT0 246
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */
+#define TEGRA186_CLK_SYNC_I2S1 247
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */
+#define TEGRA186_CLK_SYNC_I2S2 248
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */
+#define TEGRA186_CLK_SYNC_I2S3 249
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */
+#define TEGRA186_CLK_SYNC_I2S4 250
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */
+#define TEGRA186_CLK_SYNC_I2S5 251
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */
+#define TEGRA186_CLK_SYNC_I2S6 252
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */
+#define TEGRA186_CLK_SYNC_DSPK1 253
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */
+#define TEGRA186_CLK_SYNC_DSPK2 254
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */
+#define TEGRA186_CLK_SYNC_DMIC1 255
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */
+#define TEGRA186_CLK_SYNC_DMIC2 256
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */
+#define TEGRA186_CLK_SYNC_DMIC3 257
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */
+#define TEGRA186_CLK_SYNC_DMIC4 259
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_SPDIF */
+#define TEGRA186_CLK_SYNC_SPDIF 260
+/** @brief output of gate CLK_ENB_PLLREFE_OUT */
+#define TEGRA186_CLK_PLLREFE_OUT_GATED 261
+/** @brief output of the divider PLLREFE_DIVP in CLK_RST_CONTROLLER_PLLREFE_BASE. PLLREFE has 2 outputs:
+ * * VCO/pdiv defined by this clock object
+ * * VCO/2 defined by TEGRA186_CLK_PLLREFE_OUT
+ */
+#define TEGRA186_CLK_PLLREFE_OUT1 262
+#define TEGRA186_CLK_PLLD_OUT1 267
+/** @brief output of the divider PLLP_DIVP in CLK_RST_CONTROLLER_PLLP_BASE */
+#define TEGRA186_CLK_PLLP_OUT0 269
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLP_OUTC */
+#define TEGRA186_CLK_PLLP_OUT5 270
+/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */
+#define TEGRA186_CLK_PLLA 271
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_ACLK_BURST_POLICY divided by the divider controlled by ACLK_CLK_DIVISOR in CLK_RST_CONTROLLER_SUPER_ACLK_DIVIDER */
+#define TEGRA186_CLK_ACLK 273
+/** fixed 48MHz clock divided down from TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLL_U_48M 274
+/** fixed 480MHz clock divided down from TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLL_U_480M 275
+/** @brief output of the divider PLLC4_DIVP in CLK_RST_CONTROLLER_PLLC4_BASE. Output frequency is TEGRA186_CLK_PLLC4_VCO/PLLC4_DIVP */
+#define TEGRA186_CLK_PLLC4_OUT0 276
+/** fixed /3 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/3 */
+#define TEGRA186_CLK_PLLC4_OUT1 277
+/** fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/5 */
+#define TEGRA186_CLK_PLLC4_OUT2 278
+/** @brief output of mux controlled by PLLC4_CLK_SEL in CLK_RST_CONTROLLER_PLLC4_MISC1 */
+#define TEGRA186_CLK_PLLC4_OUT_MUX 279
+/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when DFLLDISP_DIV is selected in NVDISPLAY_DISP_CLK_SRC */
+#define TEGRA186_CLK_DFLLDISP_DIV 284
+/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when PLLDISPHUB_DIV is selected in NVDISPLAY_DISP_CLK_SRC */
+#define TEGRA186_CLK_PLLDISPHUB_DIV 285
+/** fixed /8 divider which is used as the input for TEGRA186_CLK_SOR_SAFE */
+#define TEGRA186_CLK_PLLP_DIV8 286
+/** @brief output of divider CLK_RST_CONTROLLER_BPMP_NIC_RATE */
+#define TEGRA186_CLK_BPMP_NIC 287
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA1_OUT1 */
+#define TEGRA186_CLK_PLL_A_OUT1 288
+/** @deprecated */
+#define TEGRA186_CLK_GPC2CLK 289
+/** A fake clock which must be enabled during KFUSE read operations to ensure adequate VDD_CORE voltage. */
+#define TEGRA186_CLK_KFUSE 293
+/**
+ * @brief controls the PLLE hardware sequencer.
+ * @details This clock only has enable and disable methods. When the
+ * PLLE hw sequencer is enabled, PLLE, will be enabled or disabled by
+ * hw based on the control signals from the PCIe, SATA and XUSB
+ * clocks. When the PLLE hw sequencer is disabled, the state of PLLE
+ * is controlled by sw using clk_enable/clk_disable on
+ * TEGRA186_CLK_PLLE.
+ */
+#define TEGRA186_CLK_PLLE_PWRSEQ 294
+/** fixed 60MHz clock divided down from, TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLLREFE_REF 295
+/** @brief output of mux controlled by SOR0_CLK_SEL0 and SOR0_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */
+#define TEGRA186_CLK_SOR0_OUT 296
+/** @brief output of mux controlled by SOR1_CLK_SEL0 and SOR1_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */
+#define TEGRA186_CLK_SOR1_OUT 297
+/** @brief fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLREFE_OUT1/5. Used as input for TEGRA186_CLK_EQOS_AXI */
+#define TEGRA186_CLK_PLLREFE_OUT1_DIV5 298
+/** @brief controls the UTMIP_PLL (aka PLLU) hardware sqeuencer */
+#define TEGRA186_CLK_UTMIP_PLL_PWRSEQ 301
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL0_MGMT */
+#define TEGRA186_CLK_PEX_USB_PAD0_MGMT 302
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL1_MGMT */
+#define TEGRA186_CLK_PEX_USB_PAD1_MGMT 303
+/** @brief controls the UPHY_PLL0 hardware sqeuencer */
+#define TEGRA186_CLK_UPHY_PLL0_PWRSEQ 304
+/** @brief controls the UPHY_PLL1 hardware sqeuencer */
+#define TEGRA186_CLK_UPHY_PLL1_PWRSEQ 305
+/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC so the bypass output even be used when the PLL is disabled */
+#define TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH 306
+/** @brief output of the mux controlled by PLLREFE_SEL_CLKIN_PEX in CLK_RST_CONTROLLER_PLLREFE_MISC */
+#define TEGRA186_CLK_PLLREFE_PEX 307
+/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC to turn on the PLL when enabled */
+#define TEGRA186_CLK_PLLREFE_IDDQ 308
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */
+#define TEGRA186_CLK_QSPI_OUT 309
+/**
+ * @brief GPC2CLK-div-2
+ * @details fixed /2 divider. Output frequency is
+ * TEGRA186_CLK_GPC2CLK/2. The frequency of this clock is the
+ * frequency at which the GPU graphics engine runs. */
+#define TEGRA186_CLK_GPCCLK 310
+/** @brief output of divider CLK_RST_CONTROLLER_AON_NIC_RATE */
+#define TEGRA186_CLK_AON_NIC 450
+/** @brief output of divider CLK_RST_CONTROLLER_SCE_NIC_RATE */
+#define TEGRA186_CLK_SCE_NIC 451
+/** Fixed 100MHz PLL for PCIe, SATA and superspeed USB */
+#define TEGRA186_CLK_PLLE 512
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC_BASE */
+#define TEGRA186_CLK_PLLC 513
+/** Fixed 408MHz PLL for use by peripheral clocks */
+#define TEGRA186_CLK_PLLP 516
+/** @deprecated */
+#define TEGRA186_CLK_PLL_P TEGRA186_CLK_PLLP
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD_BASE for use by DSI */
+#define TEGRA186_CLK_PLLD 518
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD2_BASE for use by HDMI or DP */
+#define TEGRA186_CLK_PLLD2 519
+/**
+ * @brief PLL controlled by CLK_RST_CONTROLLER_PLLREFE_BASE.
+ * @details Note that this clock only controls the VCO output, before
+ * the post-divider. See TEGRA186_CLK_PLLREFE_OUT1 for more
+ * information.
+ */
+#define TEGRA186_CLK_PLLREFE_VCO 520
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC2_BASE */
+#define TEGRA186_CLK_PLLC2 521
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC3_BASE */
+#define TEGRA186_CLK_PLLC3 522
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDP_BASE for use as the DP link clock */
+#define TEGRA186_CLK_PLLDP 523
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
+#define TEGRA186_CLK_PLLC4_VCO 524
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLA1_BASE for use by audio clocks */
+#define TEGRA186_CLK_PLLA1 525
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVCSI_BASE */
+#define TEGRA186_CLK_PLLNVCSI 526
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDISPHUB_BASE */
+#define TEGRA186_CLK_PLLDISPHUB 527
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD3_BASE for use by HDMI or DP */
+#define TEGRA186_CLK_PLLD3 528
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLBPMPCAM_BASE */
+#define TEGRA186_CLK_PLLBPMPCAM 531
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLAON_BASE for use by IP blocks in the AON domain */
+#define TEGRA186_CLK_PLLAON 532
+/** Fixed frequency 960MHz PLL for USB and EAVB */
+#define TEGRA186_CLK_PLLU 533
+/** fixed /2 divider. Output frequency is TEGRA186_CLK_PLLC4_VCO/2 */
+#define TEGRA186_CLK_PLLC4_VCO_DIV2 535
+/** @brief NAFLL clock source for AXI_CBB */
+#define TEGRA186_CLK_NAFLL_AXI_CBB 564
+/** @brief NAFLL clock source for BPMP */
+#define TEGRA186_CLK_NAFLL_BPMP 565
+/** @brief NAFLL clock source for ISP */
+#define TEGRA186_CLK_NAFLL_ISP 566
+/** @brief NAFLL clock source for NVDEC */
+#define TEGRA186_CLK_NAFLL_NVDEC 567
+/** @brief NAFLL clock source for NVENC */
+#define TEGRA186_CLK_NAFLL_NVENC 568
+/** @brief NAFLL clock source for NVJPG */
+#define TEGRA186_CLK_NAFLL_NVJPG 569
+/** @brief NAFLL clock source for SCE */
+#define TEGRA186_CLK_NAFLL_SCE 570
+/** @brief NAFLL clock source for SE */
+#define TEGRA186_CLK_NAFLL_SE 571
+/** @brief NAFLL clock source for TSEC */
+#define TEGRA186_CLK_NAFLL_TSEC 572
+/** @brief NAFLL clock source for TSECB */
+#define TEGRA186_CLK_NAFLL_TSECB 573
+/** @brief NAFLL clock source for VI */
+#define TEGRA186_CLK_NAFLL_VI 574
+/** @brief NAFLL clock source for VIC */
+#define TEGRA186_CLK_NAFLL_VIC 575
+/** @brief NAFLL clock source for DISP */
+#define TEGRA186_CLK_NAFLL_DISP 576
+/** @brief NAFLL clock source for GPU */
+#define TEGRA186_CLK_NAFLL_GPU 577
+/** @brief NAFLL clock source for M-CPU cluster */
+#define TEGRA186_CLK_NAFLL_MCPU 578
+/** @brief NAFLL clock source for B-CPU cluster */
+#define TEGRA186_CLK_NAFLL_BCPU 579
+/** @brief input from Tegra's CLK_32K_IN pad */
+#define TEGRA186_CLK_CLK_32K 608
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_M_DIVIDE */
+#define TEGRA186_CLK_CLK_M 609
+/** @brief output of divider PLL_REF_DIV in CLK_RST_CONTROLLER_OSC_CTRL */
+#define TEGRA186_CLK_PLL_REF 610
+/** @brief input from Tegra's XTAL_IN */
+#define TEGRA186_CLK_OSC 612
+/** @brief clock recovered from EAVB input */
+#define TEGRA186_CLK_EQOS_RX_INPUT 613
+/** @brief clock recovered from DTV input */
+#define TEGRA186_CLK_DTV_INPUT 614
+/** @brief SOR0 brick output which feeds into SOR0_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0*/
+#define TEGRA186_CLK_SOR0_PAD_CLKOUT 615
+/** @brief SOR1 brick output which feeds into SOR1_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1*/
+#define TEGRA186_CLK_SOR1_PAD_CLKOUT 616
+/** @brief clock recovered from I2S1 input */
+#define TEGRA186_CLK_I2S1_SYNC_INPUT 617
+/** @brief clock recovered from I2S2 input */
+#define TEGRA186_CLK_I2S2_SYNC_INPUT 618
+/** @brief clock recovered from I2S3 input */
+#define TEGRA186_CLK_I2S3_SYNC_INPUT 619
+/** @brief clock recovered from I2S4 input */
+#define TEGRA186_CLK_I2S4_SYNC_INPUT 620
+/** @brief clock recovered from I2S5 input */
+#define TEGRA186_CLK_I2S5_SYNC_INPUT 621
+/** @brief clock recovered from I2S6 input */
+#define TEGRA186_CLK_I2S6_SYNC_INPUT 622
+/** @brief clock recovered from SPDIFIN input */
+#define TEGRA186_CLK_SPDIFIN_SYNC_INPUT 623
+
+/**
+ * @brief subject to change
+ * @details maximum clock identifier value plus one.
+ */
+#define TEGRA186_CLK_CLK_MAX 624
+
+/** @} */
+
+#endif
diff --git a/include/dt-bindings/gpio/meson-gxl-gpio.h b/include/dt-bindings/gpio/meson-gxl-gpio.h
new file mode 100644
index 000000000000..684d0d7add1c
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-gxl-gpio.h
@@ -0,0 +1,131 @@
+/*
+ * GPIO definitions for Amlogic Meson GXL SoCs
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON_GXL_GPIO_H
+#define _DT_BINDINGS_MESON_GXL_GPIO_H
+
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+
+#define GPIOZ_0 0
+#define GPIOZ_1 1
+#define GPIOZ_2 2
+#define GPIOZ_3 3
+#define GPIOZ_4 4
+#define GPIOZ_5 5
+#define GPIOZ_6 6
+#define GPIOZ_7 7
+#define GPIOZ_8 8
+#define GPIOZ_9 9
+#define GPIOZ_10 10
+#define GPIOZ_11 11
+#define GPIOZ_12 12
+#define GPIOZ_13 13
+#define GPIOZ_14 14
+#define GPIOZ_15 15
+#define GPIOH_0 16
+#define GPIOH_1 17
+#define GPIOH_2 18
+#define GPIOH_3 19
+#define GPIOH_4 20
+#define GPIOH_5 21
+#define GPIOH_6 22
+#define GPIOH_7 23
+#define GPIOH_8 24
+#define GPIOH_9 25
+#define BOOT_0 26
+#define BOOT_1 27
+#define BOOT_2 28
+#define BOOT_3 29
+#define BOOT_4 30
+#define BOOT_5 31
+#define BOOT_6 32
+#define BOOT_7 33
+#define BOOT_8 34
+#define BOOT_9 35
+#define BOOT_10 36
+#define BOOT_11 37
+#define BOOT_12 38
+#define BOOT_13 39
+#define BOOT_14 40
+#define BOOT_15 41
+#define CARD_0 42
+#define CARD_1 43
+#define CARD_2 44
+#define CARD_3 45
+#define CARD_4 46
+#define CARD_5 47
+#define CARD_6 48
+#define GPIODV_0 49
+#define GPIODV_1 50
+#define GPIODV_2 51
+#define GPIODV_3 52
+#define GPIODV_4 53
+#define GPIODV_5 54
+#define GPIODV_6 55
+#define GPIODV_7 56
+#define GPIODV_8 57
+#define GPIODV_9 58
+#define GPIODV_10 59
+#define GPIODV_11 60
+#define GPIODV_12 61
+#define GPIODV_13 62
+#define GPIODV_14 63
+#define GPIODV_15 64
+#define GPIODV_16 65
+#define GPIODV_17 66
+#define GPIODV_18 67
+#define GPIODV_19 68
+#define GPIODV_20 69
+#define GPIODV_21 70
+#define GPIODV_22 71
+#define GPIODV_23 72
+#define GPIODV_24 73
+#define GPIODV_25 74
+#define GPIODV_26 75
+#define GPIODV_27 76
+#define GPIODV_28 77
+#define GPIODV_29 78
+#define GPIOX_0 79
+#define GPIOX_1 80
+#define GPIOX_2 81
+#define GPIOX_3 82
+#define GPIOX_4 83
+#define GPIOX_5 84
+#define GPIOX_6 85
+#define GPIOX_7 86
+#define GPIOX_8 87
+#define GPIOX_9 88
+#define GPIOX_10 89
+#define GPIOX_11 90
+#define GPIOX_12 91
+#define GPIOX_13 92
+#define GPIOX_14 93
+#define GPIOX_15 94
+#define GPIOX_16 95
+#define GPIOX_17 96
+#define GPIOX_18 97
+#define GPIOCLK_0 98
+#define GPIOCLK_1 99
+#define GPIO_TEST_N 100
+
+#endif
diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h
new file mode 100644
index 000000000000..f5d66e5f5f10
--- /dev/null
+++ b/include/dt-bindings/mailbox/tegra186-hsp.h
@@ -0,0 +1,24 @@
+/*
+ * This header provides constants for binding nvidia,tegra186-hsp.
+ */
+
+#ifndef _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H
+#define _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H
+
+/*
+ * These define the type of mailbox that is to be used (doorbell, shared
+ * mailbox, shared semaphore or arbitrated semaphore).
+ */
+#define TEGRA_HSP_MBOX_TYPE_DB 0x0
+#define TEGRA_HSP_MBOX_TYPE_SM 0x1
+#define TEGRA_HSP_MBOX_TYPE_SS 0x2
+#define TEGRA_HSP_MBOX_TYPE_AS 0x3
+
+/*
+ * These defines represent the bit associated with the given master ID in the
+ * doorbell registers.
+ */
+#define TEGRA_HSP_DB_MASTER_CCPLEX 17
+#define TEGRA_HSP_DB_MASTER_BPMP 19
+
+#endif
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
deleted file mode 100644
index 2383dd20ff43..000000000000
--- a/include/dt-bindings/net/mscc-phy-vsc8531.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Device Tree constants for Microsemi VSC8531 PHY
- *
- * Author: Nagaraju Lakkaraju
- *
- * License: Dual MIT/GPL
- * Copyright (c) 2016 Microsemi Corporation
- */
-
-#ifndef _DT_BINDINGS_MSCC_VSC8531_H
-#define _DT_BINDINGS_MSCC_VSC8531_H
-
-/* MAC interface Edge rate control VDDMAC in milli Volts */
-#define MSCC_VDDMAC_3300 3300
-#define MSCC_VDDMAC_2500 2500
-#define MSCC_VDDMAC_1800 1800
-#define MSCC_VDDMAC_1500 1500
-#define MSCC_VDDMAC_MAX 4
-#define MSCC_SLOWDOWN_MAX 8
-
-#endif
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index bbca3d038900..2732d6c0fb39 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -15,6 +15,8 @@
#define AT91_PINCTRL_DEGLITCH (1 << 2)
#define AT91_PINCTRL_PULL_DOWN (1 << 3)
#define AT91_PINCTRL_DIS_SCHMIT (1 << 4)
+#define AT91_PINCTRL_OUTPUT (1 << 7)
+#define AT91_PINCTRL_OUTPUT_VAL(x) ((x & 0x1) << 8)
#define AT91_PINCTRL_DEBOUNCE (1 << 16)
#define AT91_PINCTRL_DEBOUNCE_VAL(x) (x << 17)
diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h
index 6f0bc37af39c..e4e4fdf5d38f 100644
--- a/include/dt-bindings/pinctrl/bcm2835.h
+++ b/include/dt-bindings/pinctrl/bcm2835.h
@@ -24,4 +24,9 @@
#define BCM2835_FSEL_ALT2 6
#define BCM2835_FSEL_ALT3 7
+/* brcm,pull property */
+#define BCM2835_PUD_OFF 0
+#define BCM2835_PUD_DOWN 1
+#define BCM2835_PUD_UP 2
+
#endif /* __DT_BINDINGS_PINCTRL_BCM2835_H__ */
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index aafa76cb569d..d33f17c8a515 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -89,6 +89,10 @@
#define PMA8084_GPIO_S4 2
#define PMA8084_GPIO_L6 3
+#define PM8994_GPIO_VPH 0
+#define PM8994_GPIO_S4 2
+#define PM8994_GPIO_L12 3
+
/* To be used with "function" */
#define PMIC_GPIO_FUNC_NORMAL "normal"
#define PMIC_GPIO_FUNC_PAIRED "paired"
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index a15c1704d0ec..2e360d8f7801 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -65,6 +65,12 @@
#define PMA8084_MPP_S4 2
#define PMA8084_MPP_L6 3
+#define PM8994_MPP_VPH 0
+/* Only supported for MPP_05-MPP_08 */
+#define PM8994_MPP_L19 1
+#define PM8994_MPP_S4 2
+#define PM8994_MPP_L12 3
+
/*
* Analog Input - Set the source for analog input.
* To be used with "qcom,amux-route" property
diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h
index 743e66a95e13..aaec8baaa354 100644
--- a/include/dt-bindings/pinctrl/rockchip.h
+++ b/include/dt-bindings/pinctrl/rockchip.h
@@ -25,6 +25,39 @@
#define RK_GPIO4 4
#define RK_GPIO6 6
+#define RK_PA0 0
+#define RK_PA1 1
+#define RK_PA2 2
+#define RK_PA3 3
+#define RK_PA4 4
+#define RK_PA5 5
+#define RK_PA6 6
+#define RK_PA7 7
+#define RK_PB0 8
+#define RK_PB1 9
+#define RK_PB2 10
+#define RK_PB3 11
+#define RK_PB4 12
+#define RK_PB5 13
+#define RK_PB6 14
+#define RK_PB7 15
+#define RK_PC0 16
+#define RK_PC1 17
+#define RK_PC2 18
+#define RK_PC3 19
+#define RK_PC4 20
+#define RK_PC5 21
+#define RK_PC6 22
+#define RK_PC7 23
+#define RK_PD0 24
+#define RK_PD1 25
+#define RK_PD2 26
+#define RK_PD3 27
+#define RK_PD4 28
+#define RK_PD5 29
+#define RK_PD6 30
+#define RK_PD7 31
+
#define RK_FUNC_GPIO 0
#define RK_FUNC_1 1
#define RK_FUNC_2 2
diff --git a/include/dt-bindings/power/mt2701-power.h b/include/dt-bindings/power/mt2701-power.h
new file mode 100644
index 000000000000..64cc826d642c
--- /dev/null
+++ b/include/dt-bindings/power/mt2701-power.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT2701_POWER_H
+#define _DT_BINDINGS_POWER_MT2701_POWER_H
+
+#define MT2701_POWER_DOMAIN_CONN 0
+#define MT2701_POWER_DOMAIN_DISP 1
+#define MT2701_POWER_DOMAIN_MFG 2
+#define MT2701_POWER_DOMAIN_VDEC 3
+#define MT2701_POWER_DOMAIN_ISP 4
+#define MT2701_POWER_DOMAIN_BDP 5
+#define MT2701_POWER_DOMAIN_ETH 6
+#define MT2701_POWER_DOMAIN_HIF 7
+#define MT2701_POWER_DOMAIN_IFR_MSC 8
+
+#endif /* _DT_BINDINGS_POWER_MT2701_POWER_H */
diff --git a/include/dt-bindings/power/r8a7743-sysc.h b/include/dt-bindings/power/r8a7743-sysc.h
new file mode 100644
index 000000000000..61cfbb2907ea
--- /dev/null
+++ b/include/dt-bindings/power/r8a7743-sysc.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7743_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7743_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7743_PD_CA15_CPU0 0
+#define R8A7743_PD_CA15_CPU1 1
+#define R8A7743_PD_CA15_SCU 12
+#define R8A7743_PD_SGX 20
+
+/* Always-on power area */
+#define R8A7743_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7743_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7745-sysc.h b/include/dt-bindings/power/r8a7745-sysc.h
new file mode 100644
index 000000000000..1844c1171c04
--- /dev/null
+++ b/include/dt-bindings/power/r8a7745-sysc.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7745_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7745_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7745_PD_CA7_CPU0 5
+#define R8A7745_PD_CA7_CPU1 6
+#define R8A7745_PD_SGX 20
+#define R8A7745_PD_CA7_SCU 21
+
+/* Always-on power area */
+#define R8A7745_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7745_SYSC_H__ */
diff --git a/include/dt-bindings/power/tegra186-powergate.h b/include/dt-bindings/power/tegra186-powergate.h
new file mode 100644
index 000000000000..388d6e228dc8
--- /dev/null
+++ b/include/dt-bindings/power/tegra186-powergate.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H
+#define _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H
+
+#define TEGRA186_POWER_DOMAIN_AUD 0
+#define TEGRA186_POWER_DOMAIN_DFD 1
+#define TEGRA186_POWER_DOMAIN_DISP 2
+#define TEGRA186_POWER_DOMAIN_DISPB 3
+#define TEGRA186_POWER_DOMAIN_DISPC 4
+#define TEGRA186_POWER_DOMAIN_ISPA 5
+#define TEGRA186_POWER_DOMAIN_NVDEC 6
+#define TEGRA186_POWER_DOMAIN_NVJPG 7
+#define TEGRA186_POWER_DOMAIN_MPE 8
+#define TEGRA186_POWER_DOMAIN_PCX 9
+#define TEGRA186_POWER_DOMAIN_SAX 10
+#define TEGRA186_POWER_DOMAIN_VE 11
+#define TEGRA186_POWER_DOMAIN_VIC 12
+#define TEGRA186_POWER_DOMAIN_XUSBA 13
+#define TEGRA186_POWER_DOMAIN_XUSBB 14
+#define TEGRA186_POWER_DOMAIN_XUSBC 15
+#define TEGRA186_POWER_DOMAIN_GPU 43
+#define TEGRA186_POWER_DOMAIN_MAX 44
+
+#endif
diff --git a/include/dt-bindings/reset/oxsemi,ox810se.h b/include/dt-bindings/reset/oxsemi,ox810se.h
new file mode 100644
index 000000000000..960c26e4504a
--- /dev/null
+++ b/include/dt-bindings/reset/oxsemi,ox810se.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_RESET_OXSEMI_OX810SE_H
+#define DT_RESET_OXSEMI_OX810SE_H
+
+#define RESET_ARM 0
+#define RESET_COPRO 1
+/* Reserved 2 */
+/* Reserved 3 */
+#define RESET_USBHS 4
+#define RESET_USBHSPHY 5
+#define RESET_MAC 6
+#define RESET_PCI 7
+#define RESET_DMA 8
+#define RESET_DPE 9
+#define RESET_DDR 10
+#define RESET_SATA 11
+#define RESET_SATA_LINK 12
+#define RESET_SATA_PHY 13
+ /* Reserved 14 */
+#define RESET_NAND 15
+#define RESET_GPIO 16
+#define RESET_UART1 17
+#define RESET_UART2 18
+#define RESET_MISC 19
+#define RESET_I2S 20
+#define RESET_AHB_MON 21
+#define RESET_UART3 22
+#define RESET_UART4 23
+#define RESET_SGDMA 24
+/* Reserved 25 */
+/* Reserved 26 */
+/* Reserved 27 */
+/* Reserved 28 */
+/* Reserved 29 */
+/* Reserved 30 */
+#define RESET_BUS 31
+
+#endif /* DT_RESET_OXSEMI_OX810SE_H */
diff --git a/include/dt-bindings/reset/oxsemi,ox820.h b/include/dt-bindings/reset/oxsemi,ox820.h
new file mode 100644
index 000000000000..cc6797bf01d8
--- /dev/null
+++ b/include/dt-bindings/reset/oxsemi,ox820.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_RESET_OXSEMI_OX820_H
+#define DT_RESET_OXSEMI_OX820_H
+
+#define RESET_SCU 0
+#define RESET_LEON 1
+#define RESET_ARM0 2
+#define RESET_ARM1 3
+#define RESET_USBHS 4
+#define RESET_USBPHYA 5
+#define RESET_MAC 6
+#define RESET_PCIEA 7
+#define RESET_SGDMA 8
+#define RESET_CIPHER 9
+#define RESET_DDR 10
+#define RESET_SATA 11
+#define RESET_SATA_LINK 12
+#define RESET_SATA_PHY 13
+#define RESET_PCIEPHY 14
+#define RESET_NAND 15
+#define RESET_GPIO 16
+#define RESET_UART1 17
+#define RESET_UART2 18
+#define RESET_MISC 19
+#define RESET_I2S 20
+#define RESET_SD 21
+#define RESET_MAC_2 22
+#define RESET_PCIEB 23
+#define RESET_VIDEO 24
+#define RESET_DDR_PHY 25
+#define RESET_USBPHYB 26
+#define RESET_USBDEV 27
+/* Reserved 29 */
+#define RESET_ARMDBG 29
+#define RESET_PLLA 30
+#define RESET_PLLB 31
+
+#endif /* DT_RESET_OXSEMI_OX820_H */
diff --git a/include/dt-bindings/reset/sun50i-a64-ccu.h b/include/dt-bindings/reset/sun50i-a64-ccu.h
new file mode 100644
index 000000000000..db60b29ddb11
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-a64-ccu.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN50I_A64_H_
+#define _DT_BINDINGS_RST_SUN50I_A64_H_
+
+#define RST_USB_PHY0 0
+#define RST_USB_PHY1 1
+#define RST_USB_HSIC 2
+#define RST_DRAM 3
+#define RST_MBUS 4
+#define RST_BUS_MIPI_DSI 5
+#define RST_BUS_CE 6
+#define RST_BUS_DMA 7
+#define RST_BUS_MMC0 8
+#define RST_BUS_MMC1 9
+#define RST_BUS_MMC2 10
+#define RST_BUS_NAND 11
+#define RST_BUS_DRAM 12
+#define RST_BUS_EMAC 13
+#define RST_BUS_TS 14
+#define RST_BUS_HSTIMER 15
+#define RST_BUS_SPI0 16
+#define RST_BUS_SPI1 17
+#define RST_BUS_OTG 18
+#define RST_BUS_EHCI0 19
+#define RST_BUS_EHCI1 20
+#define RST_BUS_OHCI0 21
+#define RST_BUS_OHCI1 22
+#define RST_BUS_VE 23
+#define RST_BUS_TCON0 24
+#define RST_BUS_TCON1 25
+#define RST_BUS_DEINTERLACE 26
+#define RST_BUS_CSI 27
+#define RST_BUS_HDMI0 28
+#define RST_BUS_HDMI1 29
+#define RST_BUS_DE 30
+#define RST_BUS_GPU 31
+#define RST_BUS_MSGBOX 32
+#define RST_BUS_SPINLOCK 33
+#define RST_BUS_DBG 34
+#define RST_BUS_LVDS 35
+#define RST_BUS_CODEC 36
+#define RST_BUS_SPDIF 37
+#define RST_BUS_THS 38
+#define RST_BUS_I2S0 39
+#define RST_BUS_I2S1 40
+#define RST_BUS_I2S2 41
+#define RST_BUS_I2C0 42
+#define RST_BUS_I2C1 43
+#define RST_BUS_I2C2 44
+#define RST_BUS_SCR 45
+#define RST_BUS_UART0 46
+#define RST_BUS_UART1 47
+#define RST_BUS_UART2 48
+#define RST_BUS_UART3 49
+#define RST_BUS_UART4 50
+
+#endif /* _DT_BINDINGS_RST_SUN50I_A64_H_ */
diff --git a/include/dt-bindings/reset/tegra186-reset.h b/include/dt-bindings/reset/tegra186-reset.h
new file mode 100644
index 000000000000..8a184e357955
--- /dev/null
+++ b/include/dt-bindings/reset/tegra186-reset.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ABI_MACH_T186_RESET_T186_H_
+#define _ABI_MACH_T186_RESET_T186_H_
+
+
+#define TEGRA186_RESET_ACTMON 0
+#define TEGRA186_RESET_AFI 1
+#define TEGRA186_RESET_CEC 2
+#define TEGRA186_RESET_CSITE 3
+#define TEGRA186_RESET_DP2 4
+#define TEGRA186_RESET_DPAUX 5
+#define TEGRA186_RESET_DSI 6
+#define TEGRA186_RESET_DSIB 7
+#define TEGRA186_RESET_DTV 8
+#define TEGRA186_RESET_DVFS 9
+#define TEGRA186_RESET_ENTROPY 10
+#define TEGRA186_RESET_EXTPERIPH1 11
+#define TEGRA186_RESET_EXTPERIPH2 12
+#define TEGRA186_RESET_EXTPERIPH3 13
+#define TEGRA186_RESET_GPU 14
+#define TEGRA186_RESET_HDA 15
+#define TEGRA186_RESET_HDA2CODEC_2X 16
+#define TEGRA186_RESET_HDA2HDMICODEC 17
+#define TEGRA186_RESET_HOST1X 18
+#define TEGRA186_RESET_I2C1 19
+#define TEGRA186_RESET_I2C2 20
+#define TEGRA186_RESET_I2C3 21
+#define TEGRA186_RESET_I2C4 22
+#define TEGRA186_RESET_I2C5 23
+#define TEGRA186_RESET_I2C6 24
+#define TEGRA186_RESET_ISP 25
+#define TEGRA186_RESET_KFUSE 26
+#define TEGRA186_RESET_LA 27
+#define TEGRA186_RESET_MIPI_CAL 28
+#define TEGRA186_RESET_PCIE 29
+#define TEGRA186_RESET_PCIEXCLK 30
+#define TEGRA186_RESET_SATA 31
+#define TEGRA186_RESET_SATACOLD 32
+#define TEGRA186_RESET_SDMMC1 33
+#define TEGRA186_RESET_SDMMC2 34
+#define TEGRA186_RESET_SDMMC3 35
+#define TEGRA186_RESET_SDMMC4 36
+#define TEGRA186_RESET_SE 37
+#define TEGRA186_RESET_SOC_THERM 38
+#define TEGRA186_RESET_SOR0 39
+#define TEGRA186_RESET_SPI1 40
+#define TEGRA186_RESET_SPI2 41
+#define TEGRA186_RESET_SPI3 42
+#define TEGRA186_RESET_SPI4 43
+#define TEGRA186_RESET_TMR 44
+#define TEGRA186_RESET_TRIG_SYS 45
+#define TEGRA186_RESET_TSEC 46
+#define TEGRA186_RESET_UARTA 47
+#define TEGRA186_RESET_UARTB 48
+#define TEGRA186_RESET_UARTC 49
+#define TEGRA186_RESET_UARTD 50
+#define TEGRA186_RESET_VI 51
+#define TEGRA186_RESET_VIC 52
+#define TEGRA186_RESET_XUSB_DEV 53
+#define TEGRA186_RESET_XUSB_HOST 54
+#define TEGRA186_RESET_XUSB_PADCTL 55
+#define TEGRA186_RESET_XUSB_SS 56
+#define TEGRA186_RESET_AON_APB 57
+#define TEGRA186_RESET_AXI_CBB 58
+#define TEGRA186_RESET_BPMP_APB 59
+#define TEGRA186_RESET_CAN1 60
+#define TEGRA186_RESET_CAN2 61
+#define TEGRA186_RESET_DMIC5 62
+#define TEGRA186_RESET_DSIC 63
+#define TEGRA186_RESET_DSID 64
+#define TEGRA186_RESET_EMC_EMC 65
+#define TEGRA186_RESET_EMC_MEM 66
+#define TEGRA186_RESET_EMCSB_EMC 67
+#define TEGRA186_RESET_EMCSB_MEM 68
+#define TEGRA186_RESET_EQOS 69
+#define TEGRA186_RESET_GPCDMA 70
+#define TEGRA186_RESET_GPIO_CTL0 71
+#define TEGRA186_RESET_GPIO_CTL1 72
+#define TEGRA186_RESET_GPIO_CTL2 73
+#define TEGRA186_RESET_GPIO_CTL3 74
+#define TEGRA186_RESET_GPIO_CTL4 75
+#define TEGRA186_RESET_GPIO_CTL5 76
+#define TEGRA186_RESET_I2C10 77
+#define TEGRA186_RESET_I2C12 78
+#define TEGRA186_RESET_I2C13 79
+#define TEGRA186_RESET_I2C14 80
+#define TEGRA186_RESET_I2C7 81
+#define TEGRA186_RESET_I2C8 82
+#define TEGRA186_RESET_I2C9 83
+#define TEGRA186_RESET_JTAG2AXI 84
+#define TEGRA186_RESET_MPHY_IOBIST 85
+#define TEGRA186_RESET_MPHY_L0_RX 86
+#define TEGRA186_RESET_MPHY_L0_TX 87
+#define TEGRA186_RESET_NVCSI 88
+#define TEGRA186_RESET_NVDISPLAY0_HEAD0 89
+#define TEGRA186_RESET_NVDISPLAY0_HEAD1 90
+#define TEGRA186_RESET_NVDISPLAY0_HEAD2 91
+#define TEGRA186_RESET_NVDISPLAY0_MISC 92
+#define TEGRA186_RESET_NVDISPLAY0_WGRP0 93
+#define TEGRA186_RESET_NVDISPLAY0_WGRP1 94
+#define TEGRA186_RESET_NVDISPLAY0_WGRP2 95
+#define TEGRA186_RESET_NVDISPLAY0_WGRP3 96
+#define TEGRA186_RESET_NVDISPLAY0_WGRP4 97
+#define TEGRA186_RESET_NVDISPLAY0_WGRP5 98
+#define TEGRA186_RESET_PWM1 99
+#define TEGRA186_RESET_PWM2 100
+#define TEGRA186_RESET_PWM3 101
+#define TEGRA186_RESET_PWM4 102
+#define TEGRA186_RESET_PWM5 103
+#define TEGRA186_RESET_PWM6 104
+#define TEGRA186_RESET_PWM7 105
+#define TEGRA186_RESET_PWM8 106
+#define TEGRA186_RESET_SCE_APB 107
+#define TEGRA186_RESET_SOR1 108
+#define TEGRA186_RESET_TACH 109
+#define TEGRA186_RESET_TSC 110
+#define TEGRA186_RESET_UARTF 111
+#define TEGRA186_RESET_UARTG 112
+#define TEGRA186_RESET_UFSHC 113
+#define TEGRA186_RESET_UFSHC_AXI_M 114
+#define TEGRA186_RESET_UPHY 115
+#define TEGRA186_RESET_ADSP 116
+#define TEGRA186_RESET_ADSPDBG 117
+#define TEGRA186_RESET_ADSPINTF 118
+#define TEGRA186_RESET_ADSPNEON 119
+#define TEGRA186_RESET_ADSPPERIPH 120
+#define TEGRA186_RESET_ADSPSCU 121
+#define TEGRA186_RESET_ADSPWDT 122
+#define TEGRA186_RESET_APE 123
+#define TEGRA186_RESET_DPAUX1 124
+#define TEGRA186_RESET_NVDEC 125
+#define TEGRA186_RESET_NVENC 126
+#define TEGRA186_RESET_NVJPG 127
+#define TEGRA186_RESET_PEX_USB_UPHY 128
+#define TEGRA186_RESET_QSPI 129
+#define TEGRA186_RESET_TSECB 130
+#define TEGRA186_RESET_VI_I2C 131
+#define TEGRA186_RESET_UARTE 132
+#define TEGRA186_RESET_TOP_GTE 133
+#define TEGRA186_RESET_SHSP 134
+#define TEGRA186_RESET_PEX_USB_UPHY_L5 135
+#define TEGRA186_RESET_PEX_USB_UPHY_L4 136
+#define TEGRA186_RESET_PEX_USB_UPHY_L3 137
+#define TEGRA186_RESET_PEX_USB_UPHY_L2 138
+#define TEGRA186_RESET_PEX_USB_UPHY_L1 139
+#define TEGRA186_RESET_PEX_USB_UPHY_L0 140
+#define TEGRA186_RESET_PEX_USB_UPHY_PLL1 141
+#define TEGRA186_RESET_PEX_USB_UPHY_PLL0 142
+#define TEGRA186_RESET_TSCTNVI 143
+#define TEGRA186_RESET_EXTPERIPH4 144
+#define TEGRA186_RESET_DSIPADCTL 145
+#define TEGRA186_RESET_AUD_MCLK 146
+#define TEGRA186_RESET_MPHY_CLK_CTL 147
+#define TEGRA186_RESET_MPHY_L1_RX 148
+#define TEGRA186_RESET_MPHY_L1_TX 149
+#define TEGRA186_RESET_UFSHC_LP 150
+#define TEGRA186_RESET_BPMP_NIC 151
+#define TEGRA186_RESET_BPMP_NSYSPORESET 152
+#define TEGRA186_RESET_BPMP_NRESET 153
+#define TEGRA186_RESET_BPMP_DBGRESETN 154
+#define TEGRA186_RESET_BPMP_PRESETDBGN 155
+#define TEGRA186_RESET_BPMP_PM 156
+#define TEGRA186_RESET_BPMP_CVC 157
+#define TEGRA186_RESET_BPMP_DMA 158
+#define TEGRA186_RESET_BPMP_HSP 159
+#define TEGRA186_RESET_TSCTNBPMP 160
+#define TEGRA186_RESET_BPMP_TKE 161
+#define TEGRA186_RESET_BPMP_GTE 162
+#define TEGRA186_RESET_BPMP_PM_ACTMON 163
+#define TEGRA186_RESET_AON_NIC 164
+#define TEGRA186_RESET_AON_NSYSPORESET 165
+#define TEGRA186_RESET_AON_NRESET 166
+#define TEGRA186_RESET_AON_DBGRESETN 167
+#define TEGRA186_RESET_AON_PRESETDBGN 168
+#define TEGRA186_RESET_AON_ACTMON 169
+#define TEGRA186_RESET_AOPM 170
+#define TEGRA186_RESET_AOVC 171
+#define TEGRA186_RESET_AON_DMA 172
+#define TEGRA186_RESET_AON_GPIO 173
+#define TEGRA186_RESET_AON_HSP 174
+#define TEGRA186_RESET_TSCTNAON 175
+#define TEGRA186_RESET_AON_TKE 176
+#define TEGRA186_RESET_AON_GTE 177
+#define TEGRA186_RESET_SCE_NIC 178
+#define TEGRA186_RESET_SCE_NSYSPORESET 179
+#define TEGRA186_RESET_SCE_NRESET 180
+#define TEGRA186_RESET_SCE_DBGRESETN 181
+#define TEGRA186_RESET_SCE_PRESETDBGN 182
+#define TEGRA186_RESET_SCE_ACTMON 183
+#define TEGRA186_RESET_SCE_PM 184
+#define TEGRA186_RESET_SCE_DMA 185
+#define TEGRA186_RESET_SCE_HSP 186
+#define TEGRA186_RESET_TSCTNSCE 187
+#define TEGRA186_RESET_SCE_TKE 188
+#define TEGRA186_RESET_SCE_GTE 189
+#define TEGRA186_RESET_SCE_CFG 190
+#define TEGRA186_RESET_ADSP_ALL 191
+/** @brief controls the power up/down sequence of UFSHC PSW partition. Controls LP_PWR_READY, LP_ISOL_EN, and LP_RESET_N signals */
+#define TEGRA186_RESET_UFSHC_LP_SEQ 192
+#define TEGRA186_RESET_SIZE 193
+
+#endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index dda39d8fa189..b717ed9d2b75 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -25,13 +25,13 @@
struct arch_timer_kvm {
/* Virtual offset */
- cycle_t cntvoff;
+ u64 cntvoff;
};
struct arch_timer_cpu {
/* Registers: control register, timer value */
u32 cntv_ctl; /* Saved/restored */
- cycle_t cntv_cval; /* Saved/restored */
+ u64 cntv_cval; /* Saved/restored */
/*
* Anything that is not used directly from assembly code goes
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 61a3d90f32b3..5b36974ed60a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -56,6 +56,27 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
+{
+ struct fwnode_handle *fwnode;
+
+ fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL);
+ if (!fwnode)
+ return NULL;
+
+ fwnode->type = FWNODE_ACPI_STATIC;
+
+ return fwnode;
+}
+
+static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
+{
+ if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC))
+ return;
+
+ kfree(fwnode);
+}
+
/**
* ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
* the PCI-defined class-code information
@@ -220,10 +241,6 @@ int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
-int __init acpi_table_parse_entries(char *id, unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries);
int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries);
@@ -420,6 +437,8 @@ static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
}
+struct acpi_device *acpi_resource_consumer(struct resource *res);
+
int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
@@ -469,6 +488,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPCV2_SUPPORT 0x00000040
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
+#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
@@ -744,6 +764,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
+static inline void acpi_dma_configure(struct device *dev,
+ enum dev_dma_attr attr) { }
+
+static inline void acpi_dma_deconfigure(struct device *dev) { }
+
#define ACPI_PTR(_ptr) (NULL)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -764,6 +789,11 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
return -EINVAL;
}
+static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
+{
+ return NULL;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 0e32dac8fd03..77e08099e554 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -23,20 +23,36 @@
#include <linux/fwnode.h>
#include <linux/irqdomain.h>
+#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
+#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
+
int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void);
+bool iort_node_match(u8 type);
u32 iort_msi_map_rid(struct device *dev, u32 req_id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+/* IOMMU interface */
+void iort_set_dma_mask(struct device *dev);
+const struct iommu_ops *iort_iommu_configure(struct device *dev);
#else
static inline void acpi_iort_init(void) { }
+static inline bool iort_node_match(u8 type) { return false; }
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
{ return req_id; }
static inline struct irq_domain *iort_get_device_domain(struct device *dev,
u32 req_id)
{ return NULL; }
+/* IOMMU interface */
+static inline void iort_set_dma_mask(struct device *dev) { }
+static inline
+const struct iommu_ops *iort_iommu_configure(struct device *dev)
+{ return NULL; }
#endif
+#define IORT_ACPI_DECLARE(name, table_id, fn) \
+ ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn)
+
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h
new file mode 100644
index 000000000000..62be3a40239d
--- /dev/null
+++ b/include/linux/ahci-remap.h
@@ -0,0 +1,28 @@
+#ifndef _LINUX_AHCI_REMAP_H
+#define _LINUX_AHCI_REMAP_H
+
+#include <linux/sizes.h>
+
+#define AHCI_VSCAP 0xa4
+#define AHCI_REMAP_CAP 0x800
+
+/* device class code */
+#define AHCI_REMAP_N_DCC 0x880
+
+/* remap-device base relative to ahci-bar */
+#define AHCI_REMAP_N_OFFSET SZ_16K
+#define AHCI_REMAP_N_SIZE SZ_16K
+
+#define AHCI_MAX_REMAP 3
+
+static inline unsigned int ahci_remap_dcc(int i)
+{
+ return AHCI_REMAP_N_DCC + i * 0x80;
+}
+
+static inline unsigned int ahci_remap_base(int i)
+{
+ return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE;
+}
+
+#endif /* _LINUX_AHCI_REMAP_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 9eb42dbc5582..fdd0a343f455 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -14,14 +14,9 @@ typedef int (kiocb_cancel_fn)(struct kiocb *);
/* prototypes */
#ifdef CONFIG_AIO
extern void exit_aio(struct mm_struct *mm);
-extern long do_io_submit(aio_context_t ctx_id, long nr,
- struct iocb __user *__user *iocbpp, bool compat);
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
#else
static inline void exit_aio(struct mm_struct *mm) { }
-static inline long do_io_submit(aio_context_t ctx_id, long nr,
- struct iocb __user * __user *iocbpp,
- bool compat) { return 0; }
static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 9d8031257a90..c70aac13244a 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -10,7 +10,12 @@ enum alarmtimer_type {
ALARM_REALTIME,
ALARM_BOOTTIME,
+ /* Supported types end here */
ALARM_NUMTYPE,
+
+ /* Used for tracing information. No usable types. */
+ ALARM_REALTIME_FREEZER,
+ ALARM_BOOTTIME_FREEZER,
};
enum alarmtimer_restart {
diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h
deleted file mode 100644
index fb83c0453489..000000000000
--- a/include/linux/amba/pl061.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#include <linux/types.h>
-
-/* platform data for the PL061 GPIO driver */
-
-struct pl061_platform_data {
- /* number of the first GPIO */
- unsigned gpio_base;
-
- /* number of the first IRQ.
- * If the IRQ functionality in not desired this must be set to 0.
- */
- unsigned irq_base;
-
- u8 directions; /* startup directions, 1: out, 0: in */
- u8 values; /* startup values */
-};
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 27e9ec8778eb..5308eae9ce35 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -84,6 +84,8 @@ struct pl08x_channel_data {
* running any DMA transfer and multiplexing can be recycled
* @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
+ * @slave_map: DMA slave matching table
+ * @slave_map_len: number of elements in @slave_map
*/
struct pl08x_platform_data {
struct pl08x_channel_data *slave_channels;
@@ -93,6 +95,8 @@ struct pl08x_platform_data {
void (*put_xfer_signal)(const struct pl08x_channel_data *, int);
u8 lli_buses;
u8 mem_buses;
+ const struct dma_slave_map *slave_map;
+ int slave_map_len;
};
#ifdef CONFIG_AMBA_PL08X
diff --git a/include/linux/ata.h b/include/linux/ata.h
index fdb180367ba1..af6859b3a93d 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -348,6 +348,7 @@ enum {
ATA_LOG_DEVSLP_DETO = 0x01,
ATA_LOG_DEVSLP_VALID = 0x07,
ATA_LOG_DEVSLP_VALID_MASK = 0x80,
+ ATA_LOG_NCQ_PRIO_OFFSET = 0x09,
/* NCQ send and receive log */
ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00,
@@ -940,6 +941,11 @@ static inline bool ata_id_has_ncq_non_data(const u16 *id)
return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5);
}
+static inline bool ata_id_has_ncq_prio(const u16 *id)
+{
+ return id[ATA_ID_SATA_CAPABILITY] & BIT(12);
+}
+
static inline bool ata_id_has_trim(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h
index 7c0f6549898b..fdb545101ede 100644
--- a/include/linux/atmel-ssc.h
+++ b/include/linux/atmel-ssc.h
@@ -20,6 +20,7 @@ struct ssc_device {
int user;
int irq;
bool clk_from_rk_pin;
+ bool sound_dai;
};
struct ssc_device * __must_check ssc_request(unsigned int ssc_num);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 9d4443f93db6..f51fca8d0b6f 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -147,7 +147,7 @@ extern void audit_log_d_path(struct audit_buffer *ab,
extern void audit_log_key(struct audit_buffer *ab,
char *key);
extern void audit_log_link_denied(const char *operation,
- struct path *link);
+ const struct path *link);
extern void audit_log_lost(const char *message);
#ifdef CONFIG_SECURITY
extern void audit_log_secctx(struct audit_buffer *ab, u32 secid);
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c357f27d5483..e850e76acaaf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,6 +116,8 @@ struct bdi_writeback {
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
+ unsigned long dirty_sleep; /* last wait */
+
struct list_head bdi_node; /* anchored at bdi->wb_list */
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -136,12 +138,13 @@ struct bdi_writeback {
struct backing_dev_info {
struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
- unsigned int capabilities; /* Device capabilities */
+ unsigned long io_pages; /* max allowed IO size */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
char *name;
+ unsigned int capabilities; /* Device capabilities */
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 97cb48f03dc7..7cf8a6c70a3f 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -63,6 +63,12 @@
#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
/*
+ * Return the data direction, READ or WRITE.
+ */
+#define bio_data_dir(bio) \
+ (op_is_write(bio_op(bio)) ? WRITE : READ)
+
+/*
* Check whether this bio carries any data or not. A NULL bio is allowed.
*/
static inline bool bio_has_data(struct bio *bio)
@@ -70,7 +76,8 @@ static inline bool bio_has_data(struct bio *bio)
if (bio &&
bio->bi_iter.bi_size &&
bio_op(bio) != REQ_OP_DISCARD &&
- bio_op(bio) != REQ_OP_SECURE_ERASE)
+ bio_op(bio) != REQ_OP_SECURE_ERASE &&
+ bio_op(bio) != REQ_OP_WRITE_ZEROES)
return true;
return false;
@@ -80,18 +87,8 @@ static inline bool bio_no_advance_iter(struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_SAME;
-}
-
-static inline bool bio_is_rw(struct bio *bio)
-{
- if (!bio_has_data(bio))
- return false;
-
- if (bio_no_advance_iter(bio))
- return false;
-
- return true;
+ bio_op(bio) == REQ_OP_WRITE_SAME ||
+ bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
static inline bool bio_mergeable(struct bio *bio)
@@ -193,18 +190,20 @@ static inline unsigned bio_segments(struct bio *bio)
struct bvec_iter iter;
/*
- * We special case discard/write same, because they interpret bi_size
- * differently:
+ * We special case discard/write same/write zeroes, because they
+ * interpret bi_size differently:
*/
- if (bio_op(bio) == REQ_OP_DISCARD)
- return 1;
-
- if (bio_op(bio) == REQ_OP_SECURE_ERASE)
- return 1;
-
- if (bio_op(bio) == REQ_OP_WRITE_SAME)
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ return 0;
+ case REQ_OP_WRITE_SAME:
return 1;
+ default:
+ break;
+ }
bio_for_each_segment(bv, bio, iter)
segs++;
@@ -409,6 +408,8 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
+extern blk_qc_t submit_bio(struct bio *);
+
extern void bio_endio(struct bio *);
static inline void bio_io_error(struct bio *bio)
@@ -423,13 +424,15 @@ extern int bio_phys_segments(struct request_queue *, struct bio *);
extern int submit_bio_wait(struct bio *bio);
extern void bio_advance(struct bio *, unsigned);
-extern void bio_init(struct bio *);
+extern void bio_init(struct bio *bio, struct bio_vec *table,
+ unsigned short max_vecs);
extern void bio_reset(struct bio *);
void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
const struct iov_iter *, gfp_t);
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 3bf5d33800ab..01b62e7bac74 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
/**
* blkg_rwstat_add - add a value to a blkg_rwstat
* @rwstat: target blkg_rwstat
- * @op: REQ_OP
- * @op_flags: rq_flag_bits
+ * @op: REQ_OP and flags
* @val: value to add
*
* Add @val to @rwstat. The counters are chosen according to @rw. The
* caller is responsible for synchronizing calls to this function.
*/
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
- int op, int op_flags, uint64_t val)
+ unsigned int op, uint64_t val)
{
struct percpu_counter *cnt;
@@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
- if (op_flags & REQ_SYNC)
+ if (op_is_sync(op))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
@@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
if (!throtl) {
blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf,
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1);
+ blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
rcu_read_unlock();
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 535ab2e13d2e..4a2ab5d99ff7 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -3,6 +3,7 @@
#include <linux/blkdev.h>
#include <linux/sbitmap.h>
+#include <linux/srcu.h>
struct blk_mq_tags;
struct blk_flush_queue;
@@ -35,6 +36,8 @@ struct blk_mq_hw_ctx {
struct blk_mq_tags *tags;
+ struct srcu_struct queue_rq_srcu;
+
unsigned long queued;
unsigned long run;
#define BLK_MQ_MAX_DISPATCH_ORDER 7
@@ -215,18 +218,20 @@ void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, int error);
void __blk_mq_end_request(struct request *rq, int error);
-void blk_mq_requeue_request(struct request *rq);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
-void blk_mq_cancel_requeue_work(struct request_queue *q);
+void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq, int error);
+bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
+void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
@@ -237,6 +242,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
+int blk_mq_map_queues(struct blk_mq_tag_set *set);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
/*
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cd395ecec99d..519ea2c9df61 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -17,7 +17,6 @@ struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
-#ifdef CONFIG_BLOCK
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -88,24 +87,6 @@ struct bio {
struct bio_vec bi_inline_vecs[0];
};
-#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
-#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
-#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
-
-#define bio_set_op_attrs(bio, op, op_flags) do { \
- if (__builtin_constant_p(op)) \
- BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \
- else \
- WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
- if (__builtin_constant_p(op_flags)) \
- BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
- else \
- WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
- (bio)->bi_opf = bio_flags(bio); \
- (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \
- (bio)->bi_opf |= (op_flags); \
-} while (0)
-
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
/*
@@ -119,6 +100,8 @@ struct bio {
#define BIO_QUIET 6 /* Make BIO Quiet */
#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
+#define BIO_THROTTLED 9 /* This bio has already been subjected to
+ * throttling rules. Don't do it again. */
/*
* Flags starting here get preserved by bio_reset() - this includes
@@ -142,53 +125,61 @@ struct bio {
#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
-#endif /* CONFIG_BLOCK */
-
/*
- * Request flags. For use in the cmd_flags field of struct request, and in
- * bi_opf of struct bio. Note that some flags are only valid in either one.
+ * Operations and flags common to the bio and request structures.
+ * We use 8 bits for encoding the operation, and the remaining 24 for flags.
+ *
+ * The least significant bit of the operation number indicates the data
+ * transfer direction:
+ *
+ * - if the least significant bit is set transfers are TO the device
+ * - if the least significant bit is not set transfers are FROM the device
+ *
+ * If a operation does not transfer data the least significant bit has no
+ * meaning.
*/
-enum rq_flag_bits {
- /* common flags */
- __REQ_FAILFAST_DEV, /* no driver retries of device errors */
+#define REQ_OP_BITS 8
+#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
+#define REQ_FLAG_BITS 24
+
+enum req_opf {
+ /* read sectors from the device */
+ REQ_OP_READ = 0,
+ /* write sectors to the device */
+ REQ_OP_WRITE = 1,
+ /* flush the volatile write cache */
+ REQ_OP_FLUSH = 2,
+ /* discard sectors */
+ REQ_OP_DISCARD = 3,
+ /* get zone information */
+ REQ_OP_ZONE_REPORT = 4,
+ /* securely erase sectors */
+ REQ_OP_SECURE_ERASE = 5,
+ /* seset a zone write pointer */
+ REQ_OP_ZONE_RESET = 6,
+ /* write the same sector many times */
+ REQ_OP_WRITE_SAME = 7,
+ /* write the zero filled sector many times */
+ REQ_OP_WRITE_ZEROES = 8,
+
+ REQ_OP_LAST,
+};
+
+enum req_flag_bits {
+ __REQ_FAILFAST_DEV = /* no driver retries of device errors */
+ REQ_OP_BITS,
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
-
__REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */
__REQ_PRIO, /* boost priority in cfq */
-
- __REQ_NOIDLE, /* don't anticipate more IO after this one */
+ __REQ_NOMERGE, /* don't touch this for merging */
+ __REQ_IDLE, /* anticipate more IO after this one */
__REQ_INTEGRITY, /* I/O includes block integrity payload */
__REQ_FUA, /* forced unit access */
__REQ_PREFLUSH, /* request for cache flush */
-
- /* bio only flags */
__REQ_RAHEAD, /* read ahead, can fail anytime */
- __REQ_THROTTLED, /* This bio has already been subjected to
- * throttling rules. Don't do it again. */
-
- /* request only flags */
- __REQ_SORTED, /* elevator knows about this request */
- __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
- __REQ_NOMERGE, /* don't touch this for merging */
- __REQ_STARTED, /* drive already may have started this one */
- __REQ_DONTPREP, /* don't call prep for this one */
- __REQ_QUEUED, /* uses queueing */
- __REQ_ELVPRIV, /* elevator private data attached */
- __REQ_FAILED, /* set if the request failed */
- __REQ_QUIET, /* don't worry about errors */
- __REQ_PREEMPT, /* set for "ide_preempt" requests and also
- for requests for which the SCSI "quiesce"
- state must be ignored. */
- __REQ_ALLOCED, /* request came from our alloc pool */
- __REQ_COPY_USER, /* contains copies of user pages */
- __REQ_FLUSH_SEQ, /* request for flush sequence */
- __REQ_IO_STAT, /* account I/O stat */
- __REQ_MIXED_MERGE, /* merge of different types, fail separately */
- __REQ_PM, /* runtime pm request */
- __REQ_HASHED, /* on IO scheduler merge hash */
- __REQ_MQ_INFLIGHT, /* track inflight for MQ */
+ __REQ_BACKGROUND, /* background IO */
__REQ_NR_BITS, /* stops here */
};
@@ -198,54 +189,47 @@ enum rq_flag_bits {
#define REQ_SYNC (1ULL << __REQ_SYNC)
#define REQ_META (1ULL << __REQ_META)
#define REQ_PRIO (1ULL << __REQ_PRIO)
-#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
+#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
+#define REQ_IDLE (1ULL << __REQ_IDLE)
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
+#define REQ_FUA (1ULL << __REQ_FUA)
+#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
+#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
+#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
-#define REQ_COMMON_MASK \
- (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
- REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
-#define REQ_CLONE_MASK REQ_COMMON_MASK
-/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
+ (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
-#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
-#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
+#define bio_op(bio) \
+ ((bio)->bi_opf & REQ_OP_MASK)
+#define req_op(req) \
+ ((req)->cmd_flags & REQ_OP_MASK)
-#define REQ_SORTED (1ULL << __REQ_SORTED)
-#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
-#define REQ_FUA (1ULL << __REQ_FUA)
-#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
-#define REQ_STARTED (1ULL << __REQ_STARTED)
-#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
-#define REQ_QUEUED (1ULL << __REQ_QUEUED)
-#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
-#define REQ_FAILED (1ULL << __REQ_FAILED)
-#define REQ_QUIET (1ULL << __REQ_QUIET)
-#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
-#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
-#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
-#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
-#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
-#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
-#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
-#define REQ_PM (1ULL << __REQ_PM)
-#define REQ_HASHED (1ULL << __REQ_HASHED)
-#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
-
-enum req_op {
- REQ_OP_READ,
- REQ_OP_WRITE,
- REQ_OP_DISCARD, /* request to discard sectors */
- REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
- REQ_OP_WRITE_SAME, /* write same block many times */
- REQ_OP_FLUSH, /* request for cache flush */
-};
+/* obsolete, don't use in new code */
+static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
+ unsigned op_flags)
+{
+ bio->bi_opf = op | op_flags;
+}
-#define REQ_OP_BITS 3
+static inline bool op_is_write(unsigned int op)
+{
+ return (op & 1);
+}
+
+/*
+ * Reads are always treated as synchronous, as are requests with the FUA or
+ * PREFLUSH flag. Other operations may be marked as synchronous using the
+ * REQ_SYNC flag.
+ */
+static inline bool op_is_sync(unsigned int op)
+{
+ return (op & REQ_OP_MASK) == REQ_OP_READ ||
+ (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
+}
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
@@ -271,4 +255,20 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
}
+struct blk_issue_stat {
+ u64 time;
+};
+
+#define BLK_RQ_STAT_BATCH 64
+
+struct blk_rq_stat {
+ s64 mean;
+ u64 min;
+ u64 max;
+ s32 nr_samples;
+ s32 nr_batch;
+ u64 batch;
+ s64 time;
+};
+
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c47c358ba052..83695641bd5e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -24,6 +24,7 @@
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
+#include <linux/blkzoned.h>
struct module;
struct scsi_ioctl_command;
@@ -37,6 +38,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
+struct rq_wb;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -77,6 +79,55 @@ enum rq_cmd_type_bits {
REQ_TYPE_DRV_PRIV, /* driver defined types from here */
};
+/*
+ * request flags */
+typedef __u32 __bitwise req_flags_t;
+
+/* elevator knows about this request */
+#define RQF_SORTED ((__force req_flags_t)(1 << 0))
+/* drive already may have started this one */
+#define RQF_STARTED ((__force req_flags_t)(1 << 1))
+/* uses tagged queueing */
+#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
+/* may not be passed by ioscheduler */
+#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
+/* request for flush sequence */
+#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
+/* merge of different types, fail separately */
+#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
+/* track inflight for MQ */
+#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
+/* don't call prep for this one */
+#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
+/* set for "ide_preempt" requests and also for requests for which the SCSI
+ "quiesce" state must be ignored. */
+#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
+/* contains copies of user pages */
+#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
+/* vaguely specified driver internal error. Ignored by the block layer */
+#define RQF_FAILED ((__force req_flags_t)(1 << 10))
+/* don't warn about errors */
+#define RQF_QUIET ((__force req_flags_t)(1 << 11))
+/* elevator private data attached */
+#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
+/* account I/O stat */
+#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
+/* request came from our alloc pool */
+#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
+/* runtime pm request */
+#define RQF_PM ((__force req_flags_t)(1 << 15))
+/* on IO scheduler merge hash */
+#define RQF_HASHED ((__force req_flags_t)(1 << 16))
+/* IO stats tracking on */
+#define RQF_STATS ((__force req_flags_t)(1 << 17))
+/* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+
+/* flags that prevent us from merging requests: */
+#define RQF_NOMERGE_FLAGS \
+ (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+
#define BLK_MAX_CDB 16
/*
@@ -97,7 +148,8 @@ struct request {
int cpu;
unsigned cmd_type;
- u64 cmd_flags;
+ unsigned int cmd_flags; /* op and common flags */
+ req_flags_t rq_flags;
unsigned long atomic_flags;
/* the following two fields are internal, NEVER access directly */
@@ -126,6 +178,7 @@ struct request {
*/
union {
struct rb_node rb_node; /* sort/lookup */
+ struct bio_vec special_vec;
void *completion_data;
};
@@ -151,6 +204,7 @@ struct request {
struct gendisk *rq_disk;
struct hd_struct *part;
unsigned long start_time;
+ struct blk_issue_stat issue_stat;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
@@ -198,20 +252,6 @@ struct request {
struct request *next_rq;
};
-#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
-#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
-
-#define req_set_op(req, op) do { \
- WARN_ON(op >= (1 << REQ_OP_BITS)); \
- (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
- (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
-} while (0)
-
-#define req_set_op_attrs(req, op, flags) do { \
- req_set_op(req, op); \
- (req)->cmd_flags |= flags; \
-} while (0)
-
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
@@ -248,7 +288,6 @@ enum blk_queue_state {
struct blk_queue_tag {
struct request **tag_index; /* map of busy tags */
unsigned long *tag_map; /* bit map of free/busy tags */
- int busy; /* current depth */
int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */
atomic_t refcnt; /* map can be shared */
@@ -261,6 +300,15 @@ struct blk_queue_tag {
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+/*
+ * Zoned block device models (zoned limit).
+ */
+enum blk_zoned_model {
+ BLK_ZONED_NONE, /* Regular block device */
+ BLK_ZONED_HA, /* Host-aware zoned block device */
+ BLK_ZONED_HM, /* Host-managed zoned block device */
+};
+
struct queue_limits {
unsigned long bounce_pfn;
unsigned long seg_boundary_mask;
@@ -278,6 +326,7 @@ struct queue_limits {
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
unsigned int max_write_same_sectors;
+ unsigned int max_write_zeroes_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
@@ -290,8 +339,45 @@ struct queue_limits {
unsigned char cluster;
unsigned char discard_zeroes_data;
unsigned char raid_partial_stripes_expensive;
+ enum blk_zoned_model zoned;
};
+#ifdef CONFIG_BLK_DEV_ZONED
+
+struct blk_zone_report_hdr {
+ unsigned int nr_zones;
+ u8 padding[60];
+};
+
+extern int blkdev_report_zones(struct block_device *bdev,
+ sector_t sector, struct blk_zone *zones,
+ unsigned int *nr_zones, gfp_t gfp_mask);
+extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
+ sector_t nr_sectors, gfp_t gfp_mask);
+
+extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
+extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
+
+#else /* CONFIG_BLK_DEV_ZONED */
+
+static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
+ fmode_t mode, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
+ fmode_t mode, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+#endif /* CONFIG_BLK_DEV_ZONED */
+
struct request_queue {
/*
* Together with queue_head for cacheline sharing
@@ -302,6 +388,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct rq_wb *rq_wb;
+
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
* is used, root blkg allocates from @q->root_rl and all other
@@ -327,6 +415,8 @@ struct request_queue {
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
+ unsigned int queue_depth;
+
/* hw dispatch queues */
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
@@ -412,6 +502,9 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
+
+ struct blk_rq_stat rq_stats[2];
+
/*
* Number of active block driver functions for which blk_drain_queue()
* must wait. Must be incremented around functions that unlock the
@@ -420,6 +513,7 @@ struct request_queue {
unsigned int request_fn_active;
unsigned int rq_timeout;
+ int poll_nsec;
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
@@ -505,6 +599,7 @@ struct request_queue {
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
+#define QUEUE_FLAG_STATS 27 /* track rq completion times */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -601,7 +696,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
REQ_FAILFAST_DRIVER))
#define blk_account_rq(rq) \
- (((rq)->cmd_flags & REQ_STARTED) && \
+ (((rq)->rq_flags & RQF_STARTED) && \
((rq)->cmd_type == REQ_TYPE_FS))
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
@@ -627,17 +722,31 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)
return q->limits.cluster;
}
-/*
- * We regard a request as sync, if either a read or a sync write
- */
-static inline bool rw_is_sync(int op, unsigned int rw_flags)
+static inline enum blk_zoned_model
+blk_queue_zoned_model(struct request_queue *q)
+{
+ return q->limits.zoned;
+}
+
+static inline bool blk_queue_is_zoned(struct request_queue *q)
+{
+ switch (blk_queue_zoned_model(q)) {
+ case BLK_ZONED_HA:
+ case BLK_ZONED_HM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline unsigned int blk_queue_zone_size(struct request_queue *q)
{
- return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
+ return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
static inline bool rq_is_sync(struct request *rq)
{
- return rw_is_sync(req_op(rq), rq->cmd_flags);
+ return op_is_sync(rq->cmd_flags);
}
static inline bool blk_rl_full(struct request_list *rl, bool sync)
@@ -669,8 +778,13 @@ static inline bool rq_mergeable(struct request *rq)
if (req_op(rq) == REQ_OP_FLUSH)
return false;
+ if (req_op(rq) == REQ_OP_WRITE_ZEROES)
+ return false;
+
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
return false;
+ if (rq->rq_flags & RQF_NOMERGE_FLAGS)
+ return false;
return true;
}
@@ -683,6 +797,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
return false;
}
+static inline unsigned int blk_queue_depth(struct request_queue *q)
+{
+ if (q->queue_depth)
+ return q->queue_depth;
+
+ return q->nr_requests;
+}
+
/*
* q->prep_rq_fn return values
*/
@@ -790,8 +912,6 @@ extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
-extern void blk_add_request_payload(struct request *rq, struct page *page,
- int offset, unsigned int len);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -824,6 +944,7 @@ extern void __blk_run_queue(struct request_queue *q);
extern void __blk_run_queue_uncond(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
extern void blk_run_queue_async(struct request_queue *q);
+extern void blk_mq_quiesce_queue(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -837,7 +958,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
-bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@@ -888,6 +1009,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
if (unlikely(op == REQ_OP_WRITE_SAME))
return q->limits.max_write_same_sectors;
+ if (unlikely(op == REQ_OP_WRITE_ZEROES))
+ return q->limits.max_write_zeroes_sectors;
+
return q->limits.max_sectors;
}
@@ -934,6 +1058,20 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
}
/*
+ * blk_rq_set_prio - associate a request with prio from ioc
+ * @rq: request of interest
+ * @ioc: target iocontext
+ *
+ * Assocate request prio with ioc prio so request based drivers
+ * can leverage priority information.
+ */
+static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc)
+{
+ if (ioc)
+ rq->ioprio = ioc->ioprio;
+}
+
+/*
* Request issue related functions.
*/
extern struct request *blk_peek_request(struct request_queue *q);
@@ -991,6 +1129,8 @@ extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
extern void blk_queue_max_write_same_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
+extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
+ unsigned int max_write_same_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -999,6 +1139,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
@@ -1027,6 +1168,13 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
+static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return 1;
+ return rq->nr_phys_segments;
+}
+
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
@@ -1057,7 +1205,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q)
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
-extern inline void blk_set_runtime_active(struct request_queue *q) {}
+static inline void blk_set_runtime_active(struct request_queue *q) {}
#endif
/*
@@ -1078,6 +1226,7 @@ struct blk_plug {
struct list_head cb_list; /* md requires an unplug callback */
};
#define BLK_MAX_REQUEST_COUNT 16
+#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
struct blk_plug_cb;
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1151,6 +1300,9 @@ extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
struct bio **biop);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
+ bool discard);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, bool discard);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
@@ -1354,6 +1506,46 @@ static inline unsigned int bdev_write_same(struct block_device *bdev)
return 0;
}
+static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return q->limits.max_write_zeroes_sectors;
+
+ return 0;
+}
+
+static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return blk_queue_zoned_model(q);
+
+ return BLK_ZONED_NONE;
+}
+
+static inline bool bdev_is_zoned(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return blk_queue_is_zoned(q);
+
+ return false;
+}
+
+static inline unsigned int bdev_zone_size(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return blk_queue_zone_size(q);
+
+ return 0;
+}
+
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index cceb72f9e29f..e417f080219a 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq)
}
extern void blk_dump_cmd(char *buf, struct request *rq);
-extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes);
+extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
new file mode 100644
index 000000000000..92bc89ae7e20
--- /dev/null
+++ b/include/linux/bpf-cgroup.h
@@ -0,0 +1,92 @@
+#ifndef _BPF_CGROUP_H
+#define _BPF_CGROUP_H
+
+#include <linux/jump_label.h>
+#include <uapi/linux/bpf.h>
+
+struct sock;
+struct cgroup;
+struct sk_buff;
+
+#ifdef CONFIG_CGROUP_BPF
+
+extern struct static_key_false cgroup_bpf_enabled_key;
+#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+
+struct cgroup_bpf {
+ /*
+ * Store two sets of bpf_prog pointers, one for programs that are
+ * pinned directly to this cgroup, and one for those that are effective
+ * when this cgroup is accessed.
+ */
+ struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
+ struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
+};
+
+void cgroup_bpf_put(struct cgroup *cgrp);
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+
+void __cgroup_bpf_update(struct cgroup *cgrp,
+ struct cgroup *parent,
+ struct bpf_prog *prog,
+ enum bpf_attach_type type);
+
+/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
+void cgroup_bpf_update(struct cgroup *cgrp,
+ struct bpf_prog *prog,
+ enum bpf_attach_type type);
+
+int __cgroup_bpf_run_filter_skb(struct sock *sk,
+ struct sk_buff *skb,
+ enum bpf_attach_type type);
+
+int __cgroup_bpf_run_filter_sk(struct sock *sk,
+ enum bpf_attach_type type);
+
+/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) \
+ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
+ BPF_CGROUP_INET_INGRESS); \
+ \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ typeof(sk) __sk = sk_to_full_sk(sk); \
+ if (sk_fullsock(__sk)) \
+ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
+ BPF_CGROUP_INET_EGRESS); \
+ } \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled && sk) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, \
+ BPF_CGROUP_INET_SOCK_CREATE); \
+ } \
+ __ret; \
+})
+
+#else
+
+struct cgroup_bpf {};
+static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
+ struct cgroup *parent) {}
+
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c201017b5730..f74ae68086dc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -216,6 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+int bpf_prog_calc_digest(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -233,13 +234,16 @@ void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
-struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i);
-struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
+struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
+void bpf_prog_sub(struct bpf_prog *prog, int i);
+struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
+int __bpf_prog_charge(struct user_struct *user, u32 pages);
+void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
@@ -298,18 +302,33 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
+static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
+ int i)
{
return ERR_PTR(-EOPNOTSUPP);
}
+static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
+{
+}
+
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
-static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+
+static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
+{
+ return 0;
+}
+
+static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
@@ -319,6 +338,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 6aaf425cebc3..a13b031dc6b8 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -18,19 +18,12 @@
struct bpf_reg_state {
enum bpf_reg_type type;
- /*
- * Used to determine if any memory access using this register will
- * result in a bad access.
- */
- s64 min_value;
- u64 max_value;
union {
/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
s64 imm;
/* valid when type == PTR_TO_PACKET* */
struct {
- u32 id;
u16 off;
u16 range;
};
@@ -40,6 +33,13 @@ struct bpf_reg_state {
*/
struct bpf_map *map_ptr;
};
+ u32 id;
+ /* Used to determine if any memory access using this register will
+ * result in a bad access. These two fields must be last.
+ * See states_equal()
+ */
+ s64 min_value;
+ u64 max_value;
};
enum bpf_stack_slot_type {
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index e3354b74286c..4f7d8be9ddbf 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -13,11 +13,13 @@
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM54810 0x03625d00
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
+#define PHY_ID_BCM54612E 0x03625e60
#define PHY_ID_BCM54616S 0x03625d10
#define PHY_ID_BCM57780 0x03625d90
@@ -55,6 +57,7 @@
#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
+
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
@@ -105,11 +108,15 @@
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
+#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
+#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8)
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4)
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007
/*
* Broadcom LED source encodings. These are used in BCM5461, BCM5481,
@@ -124,6 +131,7 @@
#define BCM_LED_SRC_INTR 0x6
#define BCM_LED_SRC_QUALITY 0x7
#define BCM_LED_SRC_RCVLED 0x8
+#define BCM_LED_SRC_WIRESPEED 0x9
#define BCM_LED_SRC_MULTICOLOR1 0xa
#define BCM_LED_SRC_OPENSHORT 0xb
#define BCM_LED_SRC_OFF 0xe /* Tied high */
@@ -135,6 +143,14 @@
* Shadow values go into bits [14:10] of register 0x1c to select a shadow
* register to access.
*/
+
+/* 00100: Reserved control register 2 */
+#define BCM54XX_SHD_SCR2 0x04
+#define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100
+#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2
+#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2
+#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7
+
/* 00101: Spare Control Register 3 */
#define BCM54XX_SHD_SCR3 0x05
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
@@ -189,6 +205,12 @@
#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+/* BCM54810 Registers */
+#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90)
+#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
+#define BCM54810_SHD_CLK_CTL 0x3
+#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
+
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
@@ -222,6 +244,9 @@
#define LPI_FEATURE_EN_DIG1000X 0x4000
/* Core register definitions*/
+#define MII_BRCM_CORE_BASE12 0x12
+#define MII_BRCM_CORE_BASE13 0x13
+#define MII_BRCM_CORE_BASE14 0x14
#define MII_BRCM_CORE_BASE1E 0x1E
#define MII_BRCM_CORE_EXPB0 0xB0
#define MII_BRCM_CORE_EXPB1 0xB1
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index a226652a5a6c..657a718c27d2 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -40,6 +40,8 @@ struct bsg_job {
struct device *dev;
struct request *req;
+ struct kref kref;
+
/* Transport/driver specific request/reply structs */
void *request;
void *reply;
@@ -67,5 +69,7 @@ void bsg_job_done(struct bsg_job *job, int result,
int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
bsg_job_fn *job_fn, int dd_job_size);
void bsg_request_fn(struct request_queue *q);
+void bsg_job_put(struct bsg_job *job);
+int __must_check bsg_job_get(struct bsg_job *job);
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index ebbacd14d450..d67ab83823ad 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -168,7 +168,12 @@ int inode_has_buffers(struct inode *);
void invalidate_inode_buffers(struct inode *);
int remove_inode_buffers(struct inode *inode);
int sync_mapping_buffers(struct address_space *mapping);
-void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
+void clean_bdev_aliases(struct block_device *bdev, sector_t block,
+ sector_t len);
+static inline void clean_bdev_bh_alias(struct buffer_head *bh)
+{
+ clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
+}
void mark_buffer_async_write(struct buffer_head *bh);
void __wait_on_buffer(struct buffer_head *);
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 292d6a10b0c2..baff2e8fc8a8 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -121,4 +121,21 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
}
#endif /* CONFIG_GENERIC_BUG */
+
+/*
+ * Since detected data corruption should stop operation on the affected
+ * structures, this returns false if the corruption condition is found.
+ */
+#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
+ do { \
+ if (unlikely(condition)) { \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
+ pr_err(fmt, ##__VA_ARGS__); \
+ BUG(); \
+ } else \
+ WARN(1, fmt, ##__VA_ARGS__); \
+ return false; \
+ } \
+ } while (0)
+
#endif /* _LINUX_BUG_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2189935075b4..6a524bf6a06d 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -18,6 +18,7 @@ enum cache_type {
/**
* struct cacheinfo - represent a cache leaf node
+ * @id: This cache's id. It is unique among caches with the same (type, level).
* @type: type of the cache - data, inst or unified
* @level: represents the hierarchy in the multi-level cache
* @coherency_line_size: size of each cache line usually representing
@@ -44,6 +45,7 @@ enum cache_type {
* keeping, the remaining members form the core properties of the cache
*/
struct cacheinfo {
+ unsigned int id;
enum cache_type type;
unsigned int level;
unsigned int coherency_line_size;
@@ -61,6 +63,7 @@ struct cacheinfo {
#define CACHE_WRITE_ALLOCATE BIT(3)
#define CACHE_ALLOCATE_POLICY_MASK \
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
+#define CACHE_ID BIT(4)
struct device_node *of_node;
bool disable_sysfs;
@@ -71,6 +74,7 @@ struct cpu_cacheinfo {
struct cacheinfo *info_list;
unsigned int num_levels;
unsigned int num_leaves;
+ bool cpu_map_populated;
};
/*
diff --git a/include/linux/capability.h b/include/linux/capability.h
index dbc21c719ce6..6ffb67e10c06 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -240,8 +240,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
return true;
}
#endif /* CONFIG_MULTIUSER */
+extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
+extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index a7653339fedb..c71dd8fa5764 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -11,8 +11,8 @@
* published by the Free Software Foundation.
*/
-#ifndef __CPP_H__
-#define __CPP_H__
+#ifndef __CCP_H__
+#define __CCP_H__
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
@@ -553,7 +553,7 @@ enum ccp_engine {
#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002
/**
- * struct ccp_cmd - CPP operation request
+ * struct ccp_cmd - CCP operation request
* @entry: list element (ccp driver use only)
* @work: work element used for callbacks (ccp driver use only)
* @ccp: CCP device to be run on (ccp driver use only)
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 374bb1c4ef52..a6747789fe5c 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -64,7 +64,7 @@ struct ceph_auth_client_ops {
int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a, size_t len);
+ struct ceph_authorizer *a);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -118,8 +118,7 @@ extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *a);
extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
- struct ceph_authorizer *a,
- size_t len);
+ struct ceph_authorizer *a);
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index f96de8de4fa7..f4b2ee18f38c 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -653,6 +653,9 @@ enum {
extern const char *ceph_cap_op_name(int op);
+/* flags field in client cap messages (version >= 10) */
+#define CEPH_CLIENT_CAPS_SYNC (0x1)
+
/*
* caps message, used for capability callbacks, acks, requests, etc.
*/
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 87ed09f54800..8ed5dc505fbb 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -31,6 +31,10 @@ struct ceph_mdsmap {
int m_num_data_pg_pools;
u64 *m_data_pg_pools;
u64 m_cas_pg_pool;
+
+ bool m_enabled;
+ bool m_damaged;
+ int m_num_laggy;
};
static inline struct ceph_entity_addr *
@@ -59,5 +63,6 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
+extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
#endif
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 8dbd7879fdc6..c5c4c713e00f 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -1,7 +1,7 @@
#ifndef __FS_CEPH_MESSENGER_H
#define __FS_CEPH_MESSENGER_H
-#include <linux/blk_types.h>
+#include <linux/bvec.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/net.h>
@@ -30,7 +30,7 @@ struct ceph_connection_operations {
struct ceph_auth_handshake *(*get_authorizer) (
struct ceph_connection *con,
int *proto, int force_new);
- int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
+ int (*verify_authorizer_reply) (struct ceph_connection *con);
int (*invalidate_authorizer)(struct ceph_connection *con);
/* there was some error on the socket (disconnect, whatever) */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index a8e66344bacc..03a6653d329a 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -176,7 +176,7 @@ struct ceph_osd_request {
struct kref r_kref;
bool r_mempool;
struct completion r_completion;
- struct completion r_safe_completion; /* fsync waiter */
+ struct completion r_done_completion; /* fsync waiter */
ceph_osdc_callback_t r_callback;
ceph_osdc_unsafe_callback_t r_unsafe_callback;
struct list_head r_unsafe_item;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5b17de62c962..861b4677fc5b 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
+#include <linux/bpf-cgroup.h>
#ifdef CONFIG_CGROUPS
@@ -300,6 +301,9 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
+ /* used to store eBPF programs */
+ struct cgroup_bpf bpf;
+
/* ids of the ancestors at each level including self */
int ancestor_ids[];
};
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 123c02788807..e9d36b3e49de 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -17,8 +17,9 @@
#include <linux/notifier.h>
struct device;
-
struct clk;
+struct device_node;
+struct of_phandle_args;
/**
* DOC: clk notifier callback types
@@ -249,6 +250,23 @@ struct clk *clk_get(struct device *dev, const char *id);
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_get_clk_from_child - lookup and obtain a managed reference to a
+ * clock producer from child node.
+ * @dev: device for clock "consumer"
+ * @np: pointer to clock consumer node
+ * @con_id: clock consumer ID
+ *
+ * This function parses the clocks, and uses them to look up the
+ * struct clk from the registered list of clock providers by using
+ * @np and @con_id
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
+ */
+struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id);
+
+/**
* clk_enable - inform the system when the clock source should be running.
* @clk: clock source
*
@@ -432,6 +450,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id)
+{
+ return NULL;
+}
+
static inline void clk_put(struct clk *clk) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
@@ -501,9 +525,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-struct device_node;
-struct of_phandle_args;
-
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
struct clk *of_clk_get(struct device_node *np, int index);
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index ba6fa4148515..9ebf1f8243bb 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -20,10 +20,6 @@ struct device;
struct device_node;
struct generic_pm_domain;
-void r8a7778_clocks_init(u32 mode);
-void r8a7779_clocks_init(u32 mode);
-void rcar_gen2_clocks_init(u32 mode);
-
void cpg_mstp_add_clk_domain(struct device_node *np);
#ifdef CONFIG_CLK_RENESAS_CPG_MSTP
int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 08398182f56e..e315d04a2fd9 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -75,8 +75,8 @@ struct module;
* structure.
*/
struct clocksource {
- cycle_t (*read)(struct clocksource *cs);
- cycle_t mask;
+ u64 (*read)(struct clocksource *cs);
+ u64 mask;
u32 mult;
u32 shift;
u64 max_idle_ns;
@@ -98,8 +98,8 @@ struct clocksource {
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
struct list_head wd_list;
- cycle_t cs_last;
- cycle_t wd_last;
+ u64 cs_last;
+ u64 wd_last;
#endif
struct module *owner;
};
@@ -117,7 +117,7 @@ struct clocksource {
#define CLOCK_SOURCE_RESELECT 0x100
/* simplify initialization of mask field */
-#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
@@ -169,11 +169,14 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
*
- * Converts cycles to nanoseconds, using the given mult and shift.
+ * Converts clocksource cycles to nanoseconds, using the given @mult and @shift.
+ * The code is optimized for performance and is not intended to work
+ * with absolute clocksource cycles (as those will easily overflow),
+ * but is only intended to be used with relative (delta) clocksource cycles.
*
* XXX - This could use some mult_lxl_ll() asm optimization
*/
-static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
+static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
{
return ((u64) cycles * mult) >> shift;
}
@@ -233,13 +236,13 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
extern int timekeeping_notify(struct clocksource *clock);
-extern cycle_t clocksource_mmio_readl_up(struct clocksource *);
-extern cycle_t clocksource_mmio_readl_down(struct clocksource *);
-extern cycle_t clocksource_mmio_readw_up(struct clocksource *);
-extern cycle_t clocksource_mmio_readw_down(struct clocksource *);
+extern u64 clocksource_mmio_readl_up(struct clocksource *);
+extern u64 clocksource_mmio_readl_down(struct clocksource *);
+extern u64 clocksource_mmio_readw_up(struct clocksource *);
+extern u64 clocksource_mmio_readw_down(struct clocksource *);
extern int clocksource_mmio_init(void __iomem *, const char *,
- unsigned long, int, unsigned, cycle_t (*)(struct clocksource *));
+ unsigned long, int, unsigned, u64 (*)(struct clocksource *));
extern int clocksource_i8253_init(void);
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 29f9e774ab76..6f0a91b37f68 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -1,6 +1,9 @@
#ifndef __CMA_H__
#define __CMA_H__
+#include <linux/init.h>
+#include <linux/types.h>
+
/*
* There is always at least global CMA area and a few optional
* areas configured in kernel .config.
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 928e5ca0caee..0444b1336268 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -21,7 +21,7 @@
* clobbered. The issue is as follows: while the inline asm might
* access any memory it wants, the compiler could have fit all of
* @ptr into memory registers instead, and since @ptr never escaped
- * from that, it proofed that the inline asm wasn't touching any of
+ * from that, it proved that the inline asm wasn't touching any of
* it. This version works well with both compilers, i.e. we're telling
* the compiler that the inline asm absolutely may see the contents
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index d9d6a9d77489..2319b8c108e8 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -35,14 +35,11 @@
#ifndef _CONFIGFS_H_
#define _CONFIGFS_H_
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/kref.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-
-#include <linux/atomic.h>
+#include <linux/stat.h> /* S_IRUGO */
+#include <linux/types.h> /* ssize_t */
+#include <linux/list.h> /* struct list_head */
+#include <linux/kref.h> /* struct kref */
+#include <linux/mutex.h> /* struct mutex */
#define CONFIGFS_ITEM_NAME_LEN 20
@@ -228,7 +225,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
struct configfs_item_operations {
void (*release)(struct config_item *);
int (*allow_link)(struct config_item *src, struct config_item *target);
- int (*drop_link)(struct config_item *src, struct config_item *target);
+ void (*drop_link)(struct config_item *src, struct config_item *target);
};
struct configfs_group_operations {
diff --git a/include/linux/console.h b/include/linux/console.h
index d530c4627e54..9c26c6685587 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -28,9 +28,17 @@ struct tty_struct;
#define VT100ID "\033[?1;2c"
#define VT102ID "\033[?6c"
+enum con_scroll {
+ SM_UP,
+ SM_DOWN,
+};
+
/**
* struct consw - callbacks for consoles
*
+ * @con_scroll: move lines from @top to @bottom in direction @dir by @lines.
+ * Return true if no generic handling should be done.
+ * Invoked by csi_M and printing to the console.
* @con_set_palette: sets the palette of the console to @table (optional)
* @con_scrolldelta: the contents of the console should be scrolled by @lines.
* Invoked by user. (optional)
@@ -44,7 +52,9 @@ struct consw {
void (*con_putc)(struct vc_data *, int, int, int);
void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
void (*con_cursor)(struct vc_data *, int);
- int (*con_scroll)(struct vc_data *, int, int, int, int);
+ bool (*con_scroll)(struct vc_data *, unsigned int top,
+ unsigned int bottom, enum con_scroll dir,
+ unsigned int lines);
int (*con_switch)(struct vc_data *);
int (*con_blank)(struct vc_data *, int, int);
int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
@@ -99,10 +109,6 @@ static inline int con_debug_leave(void)
}
#endif
-/* scroll */
-#define SM_UP (1)
-#define SM_DOWN (2)
-
/* cursor */
#define CM_DRAW (1)
#define CM_ERASE (2)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b886dc17f2f3..21f9c74496e7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -57,9 +57,6 @@ struct notifier_block;
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
@@ -80,87 +77,14 @@ struct notifier_block;
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
-/* Need to know about CPUs going up/down? */
-#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
-#define cpu_notifier(fn, pri) { \
- static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = pri }; \
- register_cpu_notifier(&fn##_nb); \
-}
-
-#define __cpu_notifier(fn, pri) { \
- static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = pri }; \
- __register_cpu_notifier(&fn##_nb); \
-}
-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
-
-#ifdef CONFIG_HOTPLUG_CPU
-extern int register_cpu_notifier(struct notifier_block *nb);
-extern int __register_cpu_notifier(struct notifier_block *nb);
-extern void unregister_cpu_notifier(struct notifier_block *nb);
-extern void __unregister_cpu_notifier(struct notifier_block *nb);
-#else
-
-#ifndef MODULE
-extern int register_cpu_notifier(struct notifier_block *nb);
-extern int __register_cpu_notifier(struct notifier_block *nb);
-#else
-static inline int register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline int __register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-#endif
-
-static inline void unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
-
-static inline void __unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
-#endif
-
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
-#define cpu_notifier_register_begin cpu_maps_update_begin
-#define cpu_notifier_register_done cpu_maps_update_done
-
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-
-static inline int register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline int __register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline void unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
-
-static inline void __unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
-
static inline void cpu_maps_update_begin(void)
{
}
@@ -169,14 +93,6 @@ static inline void cpu_maps_update_done(void)
{
}
-static inline void cpu_notifier_register_begin(void)
-{
-}
-
-static inline void cpu_notifier_register_done(void)
-{
-}
-
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -189,12 +105,6 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
-#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
-#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
-#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
-#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
-#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
-#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
@@ -206,13 +116,6 @@ static inline void cpu_hotplug_done(void) {}
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
-#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
-#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
-#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
@@ -245,6 +148,8 @@ void arch_cpu_idle_dead(void);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
+void play_idle(unsigned long duration_ms);
+
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 32dc0cbd51ca..7e05c5e4e45c 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -175,7 +175,7 @@ void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_update_policy(unsigned int cpu);
+void cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
@@ -234,6 +234,10 @@ __ATTR(_name, _perm, show_##_name, NULL)
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
+#define cpufreq_freq_attr_wo(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0200, NULL, store_##_name)
+
struct global_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index afe641c02dca..20bfefbe7594 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -16,9 +16,11 @@ enum cpuhp_state {
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
CPUHP_X86_APB_DEAD,
+ CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
+ CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
CPUHP_CPUIDLE_DEAD,
@@ -30,6 +32,18 @@ enum cpuhp_state {
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
+ CPUHP_FS_BUFF_DEAD,
+ CPUHP_PRINTK_DEAD,
+ CPUHP_MM_MEMCQ_DEAD,
+ CPUHP_PERCPU_CNT_DEAD,
+ CPUHP_RADIX_DEAD,
+ CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_NET_DEV_DEAD,
+ CPUHP_PCI_XGENE_DEAD,
+ CPUHP_IOMMU_INTEL_DEAD,
+ CPUHP_LUSTRE_CFS_DEAD,
+ CPUHP_SCSI_BNX2FC_DEAD,
+ CPUHP_SCSI_BNX2I_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -45,12 +59,20 @@ enum cpuhp_state {
CPUHP_POWERPC_MMU_CTX_PREPARE,
CPUHP_XEN_PREPARE,
CPUHP_XEN_EVTCHN_PREPARE,
- CPUHP_NOTIFY_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
CPUHP_BLK_MQ_PREPARE,
+ CPUHP_NET_FLOW_PREPARE,
+ CPUHP_TOPOLOGY_PREPARE,
+ CPUHP_NET_IUCV_PREPARE,
+ CPUHP_ARM_BL_PREPARE,
+ CPUHP_TRACE_RB_PREPARE,
+ CPUHP_MM_ZS_PREPARE,
+ CPUHP_MM_ZSWP_MEM_PREPARE,
+ CPUHP_MM_ZSWP_POOL_PREPARE,
+ CPUHP_KVM_PPC_BOOK3S_PREPARE,
+ CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
- CPUHP_NOTF_ERR_INJ_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
@@ -58,10 +80,8 @@ enum cpuhp_state {
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
CPUHP_AP_IRQ_GIC_STARTING,
- CPUHP_AP_IRQ_GICV3_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
- CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_UNCORE_STARTING,
@@ -80,7 +100,6 @@ enum cpuhp_state {
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
- CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
@@ -94,9 +113,10 @@ enum cpuhp_state {
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
CPUHP_AP_KVM_ARM_VGIC_STARTING,
CPUHP_AP_KVM_ARM_TIMER_STARTING,
+ /* Must be the last timer callback */
+ CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
- CPUHP_AP_ARM_CORESIGHT4_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
@@ -120,7 +140,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
- CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
CPUHP_AP_X86_HPET_ONLINE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index bb31373c3478..da346f2817a8 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -74,6 +74,7 @@ struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
+ unsigned int use_deepest_state:1;
unsigned int cpu;
int last_residency;
@@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#endif
-#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
+#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
+extern void cpuidle_use_deepest_state(bool enable);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
@@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
+static inline void cpuidle_use_deepest_state(bool enable)
+{
+}
#endif
/* kernel/sched/idle.c */
@@ -235,8 +240,6 @@ struct cpuidle_governor {
int (*select) (struct cpuidle_driver *drv,
struct cpuidle_device *dev);
void (*reflect) (struct cpuidle_device *dev, int index);
-
- struct module *owner;
};
#ifdef CONFIG_CPU_IDLE
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index da7fbf1cdd56..c717f5ea88cb 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -722,6 +722,11 @@ void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
+static inline void reset_cpu_possible_mask(void)
+{
+ bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
+}
+
static inline void
set_cpu_possible(unsigned int cpu, bool possible)
{
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 7cee5551625b..c0b0cf3d2d2f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -50,6 +50,8 @@
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_KPP 0x00000008
+#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
+#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
@@ -60,6 +62,7 @@
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
+#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
#define CRYPTO_ALG_DEAD 0x00000020
@@ -87,7 +90,7 @@
#define CRYPTO_ALG_TESTED 0x00000400
/*
- * Set if the algorithm is an instance that is build from templates.
+ * Set if the algorithm is an instance that is built from templates.
*/
#define CRYPTO_ALG_INSTANCE 0x00000800
@@ -960,7 +963,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
* ablkcipher_request_set_callback() - set asynchronous callback function
* @req: request handle
* @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
* increase the wait queue beyond the initial maximum size;
* CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
* @compl: callback function pointer to be registered with the request handle
@@ -977,7 +980,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
* cipher operation completes.
*
* The callback function is registered with the ablkcipher_request handle and
- * must comply with the following template
+ * must comply with the following template::
*
* void callback_function(struct crypto_async_request *req, int error)
*/
diff --git a/include/linux/dax.h b/include/linux/dax.h
index add6c4bc568f..24ad71173995 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -8,25 +8,47 @@
struct iomap_ops;
-/* We use lowest available exceptional entry bit for locking */
+/*
+ * We use lowest available bit in exceptional entry for locking, one bit for
+ * the entry size (PMD) and two more to tell us if the entry is a huge zero
+ * page (HZP) or an empty entry that is just used for locking. In total four
+ * special bits.
+ *
+ * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
+ * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
+ * block allocation.
+ */
+#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
+#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
+#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
-ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+static inline unsigned long dax_radix_sector(void *entry)
+{
+ return (unsigned long)entry >> RADIX_DAX_SHIFT;
+}
+
+static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
+{
+ return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
+ ((unsigned long)sector << RADIX_DAX_SHIFT) |
+ RADIX_DAX_ENTRY_LOCK);
+}
+
+ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
struct iomap_ops *ops);
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
- get_block_t, dio_iodone_t, int flags);
-int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
-int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap_ops *ops);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+ pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, bool wake_all);
+ pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
-void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
unsigned int offset, unsigned int length);
#else
@@ -35,12 +57,6 @@ static inline struct page *read_dax_sector(struct block_device *bdev,
{
return ERR_PTR(-ENXIO);
}
-/* Shouldn't ever be called when dax is disabled. */
-static inline void dax_unlock_mapping_entry(struct address_space *mapping,
- pgoff_t index)
-{
- BUG();
-}
static inline int __dax_zero_page_range(struct block_device *bdev,
sector_t sector, unsigned int offset, unsigned int length)
{
@@ -48,18 +64,28 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
}
#endif
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
- unsigned int flags, get_block_t);
+#ifdef CONFIG_FS_DAX_PMD
+static inline unsigned int dax_radix_order(void *entry)
+{
+ if ((unsigned long)entry & RADIX_DAX_PMD)
+ return PMD_SHIFT - PAGE_SHIFT;
+ return 0;
+}
+int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd, unsigned int flags, struct iomap_ops *ops);
#else
-static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, unsigned int flags, get_block_t gb)
+static inline unsigned int dax_radix_order(void *entry)
+{
+ return 0;
+}
+static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd, unsigned int flags,
+ struct iomap_ops *ops)
{
return VM_FAULT_FALLBACK;
}
#endif
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 5beed7b30561..c965e4469499 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -139,7 +139,7 @@ struct dentry_operations {
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
- int (*d_manage)(struct dentry *, bool);
+ int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, const struct inode *,
unsigned int);
} ____cacheline_aligned;
@@ -254,7 +254,7 @@ extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
/* test whether we have any submounts in a subdir tree */
-extern int have_submounts(struct dentry *);
+extern int path_has_submounts(const struct path *);
/*
* This adds the entry to the hash queues.
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
index 5ac3bdd5cee6..699b6c499c4f 100644
--- a/include/linux/dcookies.h
+++ b/include/linux/dcookies.h
@@ -44,7 +44,7 @@ void dcookie_unregister(struct dcookie_user * user);
*
* Returns 0 on success, with *cookie filled in
*/
-int get_dcookie(struct path *path, unsigned long *cookie);
+int get_dcookie(const struct path *path, unsigned long *cookie);
#else
@@ -58,7 +58,7 @@ static inline void dcookie_unregister(struct dcookie_user * user)
return;
}
-static inline int get_dcookie(struct path *path, unsigned long *cookie)
+static inline int get_dcookie(const struct path *path, unsigned long *cookie)
{
return -ENOSYS;
}
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 4d3f0d1aec73..014cc564d1c4 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -52,7 +52,8 @@ extern struct srcu_struct debugfs_srcu;
* Must only be called under the protection established by
* debugfs_use_file_start().
*/
-static inline const struct file_operations *debugfs_real_fops(struct file *filp)
+static inline const struct file_operations *
+debugfs_real_fops(const struct file *filp)
__must_hold(&debugfs_srcu)
{
/*
@@ -62,6 +63,21 @@ static inline const struct file_operations *debugfs_real_fops(struct file *filp)
return filp->f_path.dentry->d_fsdata;
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+ __simple_attr_check_format(__fmt, 0ull); \
+ return simple_attr_open(inode, file, __get, __set, __fmt); \
+} \
+static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = debugfs_attr_read, \
+ .write = debugfs_attr_write, \
+ .llseek = generic_file_llseek, \
+}
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, umode_t mode,
@@ -99,21 +115,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
-static int __fops ## _open(struct inode *inode, struct file *file) \
-{ \
- __simple_attr_check_format(__fmt, 0ull); \
- return simple_attr_open(inode, file, __get, __set, __fmt); \
-} \
-static const struct file_operations __fops = { \
- .owner = THIS_MODULE, \
- .open = __fops ## _open, \
- .release = simple_attr_release, \
- .read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
- .llseek = generic_file_llseek, \
-}
-
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@@ -233,8 +234,18 @@ static inline void debugfs_use_file_finish(int srcu_idx)
__releases(&debugfs_srcu)
{ }
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
- static const struct file_operations __fops = { 0 }
+static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t debugfs_attr_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 7adf6cc4b305..c35d0c0e0ada 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -20,7 +20,6 @@
#include <linux/devfreq.h>
#include <linux/thermal.h>
-#ifdef CONFIG_DEVFREQ_THERMAL
/**
* struct devfreq_cooling_power - Devfreq cooling power ops
@@ -37,12 +36,16 @@
* @dyn_power_coeff * frequency * voltage^2
*/
struct devfreq_cooling_power {
- unsigned long (*get_static_power)(unsigned long voltage);
- unsigned long (*get_dynamic_power)(unsigned long freq,
+ unsigned long (*get_static_power)(struct devfreq *devfreq,
+ unsigned long voltage);
+ unsigned long (*get_dynamic_power)(struct devfreq *devfreq,
+ unsigned long freq,
unsigned long voltage);
unsigned long dyn_power_coeff;
};
+#ifdef CONFIG_DEVFREQ_THERMAL
+
struct thermal_cooling_device *
of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct devfreq_cooling_power *dfc_power);
diff --git a/include/linux/device.h b/include/linux/device.h
index bc41e87a969b..491b4c0ca633 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -362,6 +362,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @name: Name of the class.
* @owner: The module owner.
* @class_attrs: Default attributes of this class.
+ * @class_groups: Default attributes of this class.
* @dev_groups: Default attributes of the devices that belong to the class.
* @dev_kobj: The kobject that represents this class and links it into the hierarchy.
* @dev_uevent: Called when a device is added, removed from this class, or a
@@ -390,6 +391,7 @@ struct class {
struct module *owner;
struct class_attribute *class_attrs;
+ const struct attribute_group **class_groups;
const struct attribute_group **dev_groups;
struct kobject *dev_kobj;
@@ -465,6 +467,8 @@ struct class_attribute {
struct class_attribute class_attr_##_name = __ATTR_RW(_name)
#define CLASS_ATTR_RO(_name) \
struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+#define CLASS_ATTR_WO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_WO(_name)
extern int __must_check class_create_file_ns(struct class *class,
const struct class_attribute *attr,
@@ -698,6 +702,25 @@ static inline int devm_add_action_or_reset(struct device *dev,
return ret;
}
+/**
+ * devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @type: Type to allocate per-cpu memory for
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+#define devm_alloc_percpu(dev, type) \
+ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
+ __alignof__(type)))
+
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
+ size_t align);
+void devm_free_percpu(struct device *dev, void __percpu *pdata);
+
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
@@ -708,6 +731,87 @@ struct device_dma_parameters {
};
/**
+ * enum device_link_state - Device link states.
+ * @DL_STATE_NONE: The presence of the drivers is not being tracked.
+ * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
+ * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
+ * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
+ * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
+ * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
+ */
+enum device_link_state {
+ DL_STATE_NONE = -1,
+ DL_STATE_DORMANT = 0,
+ DL_STATE_AVAILABLE,
+ DL_STATE_CONSUMER_PROBE,
+ DL_STATE_ACTIVE,
+ DL_STATE_SUPPLIER_UNBIND,
+};
+
+/*
+ * Device link flags.
+ *
+ * STATELESS: The core won't track the presence of supplier/consumer drivers.
+ * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * PM_RUNTIME: If set, the runtime PM framework will use this link.
+ * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ */
+#define DL_FLAG_STATELESS BIT(0)
+#define DL_FLAG_AUTOREMOVE BIT(1)
+#define DL_FLAG_PM_RUNTIME BIT(2)
+#define DL_FLAG_RPM_ACTIVE BIT(3)
+
+/**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
+ * @s_node: Hook to the supplier device's list of links to consumers.
+ * @consumer: The device on the consumer end of the link.
+ * @c_node: Hook to the consumer device's list of links to suppliers.
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ */
+struct device_link {
+ struct device *supplier;
+ struct list_head s_node;
+ struct device *consumer;
+ struct list_head c_node;
+ enum device_link_state status;
+ u32 flags;
+ bool rpm_active;
+#ifdef CONFIG_SRCU
+ struct rcu_head rcu_head;
+#endif
+};
+
+/**
+ * enum dl_dev_state - Device driver presence tracking information.
+ * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
+ * @DL_DEV_PROBING: A driver is probing.
+ * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
+ * @DL_DEV_UNBINDING: The driver is unbinding from the device.
+ */
+enum dl_dev_state {
+ DL_DEV_NO_DRIVER = 0,
+ DL_DEV_PROBING,
+ DL_DEV_DRIVER_BOUND,
+ DL_DEV_UNBINDING,
+};
+
+/**
+ * struct dev_links_info - Device data related to device links.
+ * @suppliers: List of links to supplier devices.
+ * @consumers: List of links to consumer devices.
+ * @status: Driver status information.
+ */
+struct dev_links_info {
+ struct list_head suppliers;
+ struct list_head consumers;
+ enum dl_dev_state status;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -732,8 +836,9 @@ struct device_dma_parameters {
* on. This shrinks the "Board Support Packages" (BSPs) and
* minimizes board-specific #ifdefs in drivers.
* @driver_data: Private pointer for driver specific info.
+ * @links: Links to suppliers and consumers of this device.
* @power: For device power management.
- * See Documentation/power/devices.txt for details.
+ * See Documentation/power/admin-guide/devices.rst for details.
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
@@ -799,6 +904,7 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set/get_drvdata */
+ struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
@@ -1116,6 +1222,10 @@ extern void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);
+/* Device links interface. */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags);
+void device_link_del(struct device_link *link);
#ifdef CONFIG_PRINTK
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index b91b023deffb..a52c6580cc9a 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -58,7 +58,7 @@ struct dm_io_notify {
struct dm_io_client;
struct dm_io_request {
int bi_op; /* REQ_OP */
- int bi_op_flags; /* rq_flag_bits */
+ int bi_op_flags; /* req_flag_bits */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index e0b0741ae671..8daeb3ce0016 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,7 +30,7 @@
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/wait.h>
struct device;
@@ -143,7 +143,7 @@ struct dma_buf {
wait_queue_head_t poll;
struct dma_buf_poll_cb_t {
- struct fence_cb cb;
+ struct dma_fence_cb cb;
wait_queue_head_t *poll;
unsigned long active;
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
new file mode 100644
index 000000000000..5900945f962d
--- /dev/null
+++ b/include/linux/dma-fence-array.h
@@ -0,0 +1,86 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_ARRAY_H
+#define __LINUX_DMA_FENCE_ARRAY_H
+
+#include <linux/dma-fence.h>
+
+/**
+ * struct dma_fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct dma_fence_array_cb {
+ struct dma_fence_cb cb;
+ struct dma_fence_array *array;
+};
+
+/**
+ * struct dma_fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct dma_fence_array {
+ struct dma_fence base;
+
+ spinlock_t lock;
+ unsigned num_fences;
+ atomic_t num_pending;
+ struct dma_fence **fences;
+};
+
+extern const struct dma_fence_ops dma_fence_array_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subsclass
+ * @fence: fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * to_dma_fence_array - cast a fence to a dma_fence_array
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_array,
+ * or the dma_fence_array otherwise.
+ */
+static inline struct dma_fence_array *
+to_dma_fence_array(struct dma_fence *fence)
+{
+ if (fence->ops != &dma_fence_array_ops)
+ return NULL;
+
+ return container_of(fence, struct dma_fence_array, base);
+}
+
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
+#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
new file mode 100644
index 000000000000..d51a7d23c358
--- /dev/null
+++ b/include/linux/dma-fence.h
@@ -0,0 +1,438 @@
+/*
+ * Fence mechanism for dma-buf to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_H
+#define __LINUX_DMA_FENCE_H
+
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+
+struct dma_fence;
+struct dma_fence_ops;
+struct dma_fence_cb;
+
+/**
+ * struct dma_fence - software synchronization primitive
+ * @refcount: refcount for this fence
+ * @ops: dma_fence_ops associated with this fence
+ * @rcu: used for releasing fence with kfree_rcu
+ * @cb_list: list of all callbacks to call
+ * @lock: spin_lock_irqsave used for locking
+ * @context: execution context this fence belongs to, returned by
+ * dma_fence_context_alloc()
+ * @seqno: the sequence number of this fence inside the execution context,
+ * can be compared to decide which fence would be signaled later.
+ * @flags: A mask of DMA_FENCE_FLAG_* defined below
+ * @timestamp: Timestamp when the fence was signaled.
+ * @status: Optional, only valid if < 0, must be set before calling
+ * dma_fence_signal, indicates that the fence has completed with an error.
+ *
+ * the flags member must be manipulated and read using the appropriate
+ * atomic ops (bit_*), so taking the spinlock will not be needed most
+ * of the time.
+ *
+ * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
+ * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
+ * implementer of the fence for its own purposes. Can be used in different
+ * ways by different fence implementers, so do not rely on this.
+ *
+ * Since atomic bitops are used, this is not guaranteed to be the case.
+ * Particularly, if the bit was set, but dma_fence_signal was called right
+ * before this bit was set, it would have been able to set the
+ * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
+ * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
+ * after dma_fence_signal was called, any enable_signaling call will have either
+ * been completed, or never called at all.
+ */
+struct dma_fence {
+ struct kref refcount;
+ const struct dma_fence_ops *ops;
+ struct rcu_head rcu;
+ struct list_head cb_list;
+ spinlock_t *lock;
+ u64 context;
+ unsigned seqno;
+ unsigned long flags;
+ ktime_t timestamp;
+ int status;
+};
+
+enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SIGNALED_BIT,
+ DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
+};
+
+typedef void (*dma_fence_func_t)(struct dma_fence *fence,
+ struct dma_fence_cb *cb);
+
+/**
+ * struct dma_fence_cb - callback for dma_fence_add_callback
+ * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * @func: dma_fence_func_t to call
+ *
+ * This struct will be initialized by dma_fence_add_callback, additional
+ * data can be passed along by embedding dma_fence_cb in another struct.
+ */
+struct dma_fence_cb {
+ struct list_head node;
+ dma_fence_func_t func;
+};
+
+/**
+ * struct dma_fence_ops - operations implemented for fence
+ * @get_driver_name: returns the driver name.
+ * @get_timeline_name: return the name of the context this fence belongs to.
+ * @enable_signaling: enable software signaling of fence.
+ * @signaled: [optional] peek whether the fence is signaled, can be null.
+ * @wait: custom wait implementation, or dma_fence_default_wait.
+ * @release: [optional] called on destruction of fence, can be null
+ * @fill_driver_data: [optional] callback to fill in free-form debug info
+ * Returns amount of bytes filled, or -errno.
+ * @fence_value_str: [optional] fills in the value of the fence as a string
+ * @timeline_value_str: [optional] fills in the current value of the timeline
+ * as a string
+ *
+ * Notes on enable_signaling:
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * irqs, or insert commands into cmdstream, etc. This is called
+ * in the first wait() or add_callback() path to let the fence
+ * implementation know that there is another driver waiting on
+ * the signal (ie. hw->sw case).
+ *
+ * This function can be called called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * fence->status may be set in enable_signaling, but only when false is
+ * returned.
+ *
+ * Calling dma_fence_signal before enable_signaling is called allows
+ * for a tiny race window in which enable_signaling is called during,
+ * before, or after dma_fence_signal. To fight this, it is recommended
+ * that before enable_signaling returns true an extra reference is
+ * taken on the fence, to be released when the fence is signaled.
+ * This will mean dma_fence_signal will still be called twice, but
+ * the second time will be a noop since it was already signaled.
+ *
+ * Notes on signaled:
+ * May set fence->status if returning true.
+ *
+ * Notes on wait:
+ * Must not be NULL, set to dma_fence_default_wait for default implementation.
+ * the dma_fence_default_wait implementation should work for any fence, as long
+ * as enable_signaling works correctly.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ *
+ * Notes on release:
+ * Can be NULL, this function allows additional commands to run on
+ * destruction of the fence. Can be called from irq context.
+ * If pointer is set to NULL, kfree will get called instead.
+ */
+
+struct dma_fence_ops {
+ const char * (*get_driver_name)(struct dma_fence *fence);
+ const char * (*get_timeline_name)(struct dma_fence *fence);
+ bool (*enable_signaling)(struct dma_fence *fence);
+ bool (*signaled)(struct dma_fence *fence);
+ signed long (*wait)(struct dma_fence *fence,
+ bool intr, signed long timeout);
+ void (*release)(struct dma_fence *fence);
+
+ int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+ void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
+ void (*timeline_value_str)(struct dma_fence *fence,
+ char *str, int size);
+};
+
+void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, unsigned seqno);
+
+void dma_fence_release(struct kref *kref);
+void dma_fence_free(struct dma_fence *fence);
+
+/**
+ * dma_fence_put - decreases refcount of the fence
+ * @fence: [in] fence to reduce refcount of
+ */
+static inline void dma_fence_put(struct dma_fence *fence)
+{
+ if (fence)
+ kref_put(&fence->refcount, dma_fence_release);
+}
+
+/**
+ * dma_fence_get - increases refcount of the fence
+ * @fence: [in] fence to increase refcount of
+ *
+ * Returns the same fence, with refcount increased by 1.
+ */
+static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
+{
+ if (fence)
+ kref_get(&fence->refcount);
+ return fence;
+}
+
+/**
+ * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * rcu read lock
+ * @fence: [in] fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ */
+static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
+{
+ if (kref_get_unless_zero(&fence->refcount))
+ return fence;
+ else
+ return NULL;
+}
+
+/**
+ * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
+ * @fencep: [in] pointer to fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ * This function handles acquiring a reference to a fence that may be
+ * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * so long as the caller is using RCU on the pointer to the fence.
+ *
+ * An alternative mechanism is to employ a seqlock to protect a bunch of
+ * fences, such as used by struct reservation_object. When using a seqlock,
+ * the seqlock must be taken before and checked after a reference to the
+ * fence is acquired (as shown here).
+ *
+ * The caller is required to hold the RCU read lock.
+ */
+static inline struct dma_fence *
+dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
+{
+ do {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference(*fencep);
+ if (!fence || !dma_fence_get_rcu(fence))
+ return NULL;
+
+ /* The atomic_inc_not_zero() inside dma_fence_get_rcu()
+ * provides a full memory barrier upon success (such as now).
+ * This is paired with the write barrier from assigning
+ * to the __rcu protected fence pointer so that if that
+ * pointer still matches the current fence, we know we
+ * have successfully acquire a reference to it. If it no
+ * longer matches, we are holding a reference to some other
+ * reallocated pointer. This is possible if the allocator
+ * is using a freelist like SLAB_DESTROY_BY_RCU where the
+ * fence remains valid for the RCU grace period, but it
+ * may be reallocated. When using such allocators, we are
+ * responsible for ensuring the reference we get is to
+ * the right fence, as below.
+ */
+ if (fence == rcu_access_pointer(*fencep))
+ return rcu_pointer_handoff(fence);
+
+ dma_fence_put(fence);
+ } while (1);
+}
+
+int dma_fence_signal(struct dma_fence *fence);
+int dma_fence_signal_locked(struct dma_fence *fence);
+signed long dma_fence_default_wait(struct dma_fence *fence,
+ bool intr, signed long timeout);
+int dma_fence_add_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb,
+ dma_fence_func_t func);
+bool dma_fence_remove_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb);
+void dma_fence_enable_sw_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_is_signaled_locked - Return an indication if the fence
+ * is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * This function requires fence->lock to be held.
+ */
+static inline bool
+dma_fence_is_signaled_locked(struct dma_fence *fence)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ dma_fence_signal_locked(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * It's recommended for seqno fences to call dma_fence_signal when the
+ * operation is complete, it makes it possible to prevent issues from
+ * wraparound between time of issue and time of use by checking the return
+ * value of this function before calling hardware-specific wait instructions.
+ */
+static inline bool
+dma_fence_is_signaled(struct dma_fence *fence)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ dma_fence_signal(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not re-used across contexts.
+ */
+static inline bool dma_fence_is_later(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return false;
+
+ return (int)(f1->seqno - f2->seqno) > 0;
+}
+
+/**
+ * dma_fence_later - return the chronologically later fence
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns NULL if both fences are signaled, otherwise the fence that would be
+ * signaled last. Both fences must be from the same context, since a seqno is
+ * not re-used across contexts.
+ */
+static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return NULL;
+
+ /*
+ * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
+ * have been set if enable_signaling wasn't called, and enabling that
+ * here is overkill.
+ */
+ if (dma_fence_is_later(f1, f2))
+ return dma_fence_is_signaled(f1) ? NULL : f1;
+ else
+ return dma_fence_is_signaled(f2) ? NULL : f2;
+}
+
+signed long dma_fence_wait_timeout(struct dma_fence *,
+ bool intr, signed long timeout);
+signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
+ uint32_t count,
+ bool intr, signed long timeout,
+ uint32_t *idx);
+
+/**
+ * dma_fence_wait - sleep until the fence gets signaled
+ * @fence: [in] the fence to wait on
+ * @intr: [in] if true, do an interruptible wait
+ *
+ * This function will return -ERESTARTSYS if interrupted by a signal,
+ * or 0 if the fence was signaled. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly holds a reference to the fence, otherwise the
+ * fence might be freed before return, resulting in undefined behavior.
+ */
+static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
+{
+ signed long ret;
+
+ /* Since dma_fence_wait_timeout cannot timeout with
+ * MAX_SCHEDULE_TIMEOUT, only valid return values are
+ * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
+ */
+ ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+
+ return ret < 0 ? ret : 0;
+}
+
+u64 dma_fence_context_alloc(unsigned num);
+
+#define DMA_FENCE_TRACE(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
+ pr_info("f %llu#%u: " fmt, \
+ __ff->context, __ff->seqno, ##args); \
+ } while (0)
+
+#define DMA_FENCE_WARN(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#define DMA_FENCE_ERR(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 32c589062bd9..7f7e9a7e3839 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -61,6 +61,10 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 08528afdf58b..10c5a17b1f51 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -243,29 +243,33 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- enum dma_data_direction dir)
+static inline dma_addr_t dma_map_page_attrs(struct device *dev,
+ struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, 0);
+ addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
}
-static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
+static inline void dma_unmap_page_attrs(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, 0);
+ ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir, false);
}
@@ -385,6 +389,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
+#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
+#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index cc535a478bae..feee6ec6a13b 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -336,6 +336,12 @@ enum dma_slave_buswidth {
* may or may not be applicable on memory sources.
* @dst_maxburst: same as src_maxburst but for destination target
* mutatis mutandis.
+ * @src_port_window_size: The length of the register area in words the data need
+ * to be accessed on the device side. It is only used for devices which is using
+ * an area instead of a single register to receive the data. Typically the DMA
+ * loops in this area in order to transfer the data.
+ * @dst_port_window_size: same as src_port_window_size but for the destination
+ * port.
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
@@ -363,6 +369,8 @@ struct dma_slave_config {
enum dma_slave_buswidth dst_addr_width;
u32 src_maxburst;
u32 dst_maxburst;
+ u32 src_port_window_size;
+ u32 dst_port_window_size;
bool device_fc;
unsigned int slave_id;
};
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index c934d3a96b5e..2896f93808ae 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -67,7 +67,7 @@
* genl_magic_func.h
* generates an entry in the static genl_ops array,
* and static register/unregister functions to
- * genl_register_family_with_ops().
+ * genl_register_family().
*
* flags and handler:
* GENL_op_init( .doit = x, .dumpit = y, .flags = something)
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 1f79b20918b1..4334106f44c3 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -50,6 +50,6 @@ dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
unsigned long freq);
void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs);
void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs);
-cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs);
+u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs);
#endif /* __DW_APB_TIMER_H__ */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 9e0d78966552..07c52c0af62d 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -18,6 +18,8 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#define EDAC_DEVICE_NAME_LEN 31
+
struct device;
#define EDAC_OPSTATE_INVAL -1
@@ -128,12 +130,21 @@ enum dev_type {
* fatal (maybe it is on an unused memory area,
* or the memory controller could recover from
* it for example, by re-trying the operation).
+ * @HW_EVENT_ERR_DEFERRED: Deferred Error - Indicates an uncorrectable
+ * error whose handling is not urgent. This could
+ * be due to hardware data poisoning where the
+ * system can continue operation until the poisoned
+ * data is consumed. Preemptive measures may also
+ * be taken, e.g. offlining pages, etc.
* @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not
* be recovered.
+ * @HW_EVENT_ERR_INFO: Informational - The CPER spec defines a forth
+ * type of error: informational logs.
*/
enum hw_event_mc_err_type {
HW_EVENT_ERR_CORRECTED,
HW_EVENT_ERR_UNCORRECTED,
+ HW_EVENT_ERR_DEFERRED,
HW_EVENT_ERR_FATAL,
HW_EVENT_ERR_INFO,
};
@@ -145,6 +156,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
return "Corrected";
case HW_EVENT_ERR_UNCORRECTED:
return "Uncorrected";
+ case HW_EVENT_ERR_DEFERRED:
+ return "Deferred";
case HW_EVENT_ERR_FATAL:
return "Fatal";
default:
@@ -157,7 +170,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* enum mem_type - memory types. For a more detailed reference, please see
* http://en.wikipedia.org/wiki/DRAM
*
- * @MEM_EMPTY Empty csrow
+ * @MEM_EMPTY: Empty csrow
* @MEM_RESERVED: Reserved csrow type
* @MEM_UNKNOWN: Unknown csrow type
* @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995.
@@ -192,10 +205,11 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_DDR3: DDR3 RAM
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
- * @MEM_LRDDR3 Load-Reduced DDR3 memory.
+ * @MEM_LRDDR3: Load-Reduced DDR3 memory.
* @MEM_DDR4: Unbuffered DDR4 RAM
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
+ * @MEM_LRDDR4: Load-Reduced DDR4 memory.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -218,6 +232,7 @@ enum mem_type {
MEM_LRDDR3,
MEM_DDR4,
MEM_RDDR4,
+ MEM_LRDDR4,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -239,6 +254,7 @@ enum mem_type {
#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
/**
* enum edac-type - Error Detection and Correction capabilities and mode
@@ -278,7 +294,7 @@ enum edac_type {
/**
* enum scrub_type - scrubbing capabilities
- * @SCRUB_UNKNOWN Unknown if scrubber is available
+ * @SCRUB_UNKNOWN: Unknown if scrubber is available
* @SCRUB_NONE: No scrubber
* @SCRUB_SW_PROG: SW progressive (sequential) scrubbing
* @SCRUB_SW_SRC: Software scrub only errors
@@ -287,7 +303,7 @@ enum edac_type {
* @SCRUB_HW_PROG: HW progressive (sequential) scrubbing
* @SCRUB_HW_SRC: Hardware scrub only errors
* @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error
- * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable
+ * @SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable
*/
enum scrub_type {
SCRUB_UNKNOWN = 0,
@@ -320,114 +336,6 @@ enum scrub_type {
#define OP_RUNNING_POLL_INTR 0x203
#define OP_OFFLINE 0x300
-/*
- * Concepts used at the EDAC subsystem
- *
- * There are several things to be aware of that aren't at all obvious:
- *
- * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
- *
- * These are some of the many terms that are thrown about that don't always
- * mean what people think they mean (Inconceivable!). In the interest of
- * creating a common ground for discussion, terms and their definitions
- * will be established.
- *
- * Memory devices: The individual DRAM chips on a memory stick. These
- * devices commonly output 4 and 8 bits each (x4, x8).
- * Grouping several of these in parallel provides the
- * number of bits that the memory controller expects:
- * typically 72 bits, in order to provide 64 bits +
- * 8 bits of ECC data.
- *
- * Memory Stick: A printed circuit board that aggregates multiple
- * memory devices in parallel. In general, this is the
- * Field Replaceable Unit (FRU) which gets replaced, in
- * the case of excessive errors. Most often it is also
- * called DIMM (Dual Inline Memory Module).
- *
- * Memory Socket: A physical connector on the motherboard that accepts
- * a single memory stick. Also called as "slot" on several
- * datasheets.
- *
- * Channel: A memory controller channel, responsible to communicate
- * with a group of DIMMs. Each channel has its own
- * independent control (command) and data bus, and can
- * be used independently or grouped with other channels.
- *
- * Branch: It is typically the highest hierarchy on a
- * Fully-Buffered DIMM memory controller.
- * Typically, it contains two channels.
- * Two channels at the same branch can be used in single
- * mode or in lockstep mode.
- * When lockstep is enabled, the cacheline is doubled,
- * but it generally brings some performance penalty.
- * Also, it is generally not possible to point to just one
- * memory stick when an error occurs, as the error
- * correction code is calculated using two DIMMs instead
- * of one. Due to that, it is capable of correcting more
- * errors than on single mode.
- *
- * Single-channel: The data accessed by the memory controller is contained
- * into one dimm only. E. g. if the data is 64 bits-wide,
- * the data flows to the CPU using one 64 bits parallel
- * access.
- * Typically used with SDR, DDR, DDR2 and DDR3 memories.
- * FB-DIMM and RAMBUS use a different concept for channel,
- * so this concept doesn't apply there.
- *
- * Double-channel: The data size accessed by the memory controller is
- * interlaced into two dimms, accessed at the same time.
- * E. g. if the DIMM is 64 bits-wide (72 bits with ECC),
- * the data flows to the CPU using a 128 bits parallel
- * access.
- *
- * Chip-select row: This is the name of the DRAM signal used to select the
- * DRAM ranks to be accessed. Common chip-select rows for
- * single channel are 64 bits, for dual channel 128 bits.
- * It may not be visible by the memory controller, as some
- * DIMM types have a memory buffer that can hide direct
- * access to it from the Memory Controller.
- *
- * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory.
- * Motherboards commonly drive two chip-select pins to
- * a memory stick. A single-ranked stick, will occupy
- * only one of those rows. The other will be unused.
- *
- * Double-Ranked stick: A double-ranked stick has two chip-select rows which
- * access different sets of memory devices. The two
- * rows cannot be accessed concurrently.
- *
- * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
- * A double-sided stick has two chip-select rows which
- * access different sets of memory devices. The two
- * rows cannot be accessed concurrently. "Double-sided"
- * is irrespective of the memory devices being mounted
- * on both sides of the memory stick.
- *
- * Socket set: All of the memory sticks that are required for
- * a single memory access or all of the memory sticks
- * spanned by a chip-select row. A single socket set
- * has two chip-select rows and if double-sided sticks
- * are used these will occupy those chip-select rows.
- *
- * Bank: This term is avoided because it is unclear when
- * needing to distinguish between chip-select rows and
- * socket sets.
- *
- * Controller pages:
- *
- * Physical pages:
- *
- * Virtual pages:
- *
- *
- * STRUCTURE ORGANIZATION AND CHOICES
- *
- *
- *
- * PS - I enjoyed writing all that about as much as you enjoyed reading it.
- */
-
/**
* enum edac_mc_layer - memory controller hierarchy layer
*
@@ -452,7 +360,7 @@ enum edac_mc_layer_type {
/**
* struct edac_mc_layer - describes the memory controller hierarchy
- * @layer: layer type
+ * @type: layer type
* @size: number of components per layer. For example,
* if the channel layer has two channels, size = 2
* @is_virt_csrow: This layer is part of the "csrow" when old API
@@ -475,24 +383,28 @@ struct edac_mc_layer {
#define EDAC_MAX_LAYERS 3
/**
- * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array
- * for the element given by [layer0,layer1,layer2] position
+ * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer
+ * array for the element given by [layer0,layer1,layer2]
+ * position
*
* @layers: a struct edac_mc_layer array, describing how many elements
* were allocated for each layer
- * @n_layers: Number of layers at the @layers array
+ * @nlayers: Number of layers at the @layers array
* @layer0: layer0 position
* @layer1: layer1 position. Unused if n_layers < 2
* @layer2: layer2 position. Unused if n_layers < 3
*
- * For 1 layer, this macro returns &var[layer0] - &var
+ * For 1 layer, this macro returns "var[layer0] - var";
+ *
* For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "&var[layer0][layer1] - &var"
+ * and to return "var[layer0][layer1] - var";
+ *
* For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "&var[layer0][layer1][layer2] - &var"
+ * and to return "var[layer0][layer1][layer2] - var".
*
* A loop could be used here to make it more generic, but, as we only have
* 3 layers, this is a little faster.
+ *
* By design, layers can never be 0 or more than 3. If that ever happens,
* a NULL is returned, causing an OOPS during the memory allocation routine,
* with would point to the developer that he's doing something wrong.
@@ -519,16 +431,18 @@ struct edac_mc_layer {
* were allocated for each layer
* @var: name of the var where we want to get the pointer
* (like mci->dimms)
- * @n_layers: Number of layers at the @layers array
+ * @nlayers: Number of layers at the @layers array
* @layer0: layer0 position
* @layer1: layer1 position. Unused if n_layers < 2
* @layer2: layer2 position. Unused if n_layers < 3
*
- * For 1 layer, this macro returns &var[layer0]
+ * For 1 layer, this macro returns "var[layer0]";
+ *
* For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "&var[layer0][layer1]"
+ * and to return "var[layer0][layer1]";
+ *
* For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "&var[layer0][layer1][layer2]"
+ * and to return "var[layer0][layer1][layer2]";
*/
#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
typeof(*var) __p; \
@@ -614,7 +528,7 @@ struct errcount_attribute_data {
};
/**
- * edac_raw_error_desc - Raw error report structure
+ * struct edac_raw_error_desc - Raw error report structure
* @grain: minimum granularity for an error report, in bytes
* @error_count: number of errors of the same type
* @top_layer: top layer of the error (layer[0])
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2d089487d2da..a07a476178cd 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -443,6 +443,22 @@ typedef struct {
#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
+typedef struct {
+ u32 version;
+ u32 get;
+ u32 set;
+ u32 del;
+ u32 get_all;
+} apple_properties_protocol_32_t;
+
+typedef struct {
+ u64 version;
+ u64 get;
+ u64 set;
+ u64 del;
+ u64 get_all;
+} apple_properties_protocol_64_t;
+
/*
* Types and defines for EFI ResetSystem
*/
@@ -589,8 +605,10 @@ void efi_native_runtime_setup(void);
#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61)
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
/*
* This GUID is used to pass to the kernel proper the struct screen_info
@@ -599,6 +617,7 @@ void efi_native_runtime_setup(void);
*/
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
typedef struct {
efi_guid_t guid;
@@ -872,6 +891,7 @@ extern struct efi {
unsigned long esrt; /* ESRT table */
unsigned long properties_table; /* properties table */
unsigned long mem_attr_table; /* memory attributes table */
+ unsigned long rng_seed; /* UEFI firmware random seed */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -1145,6 +1165,26 @@ struct efi_generic_dev_path {
u16 length;
} __attribute ((packed));
+struct efi_dev_path {
+ u8 type; /* can be replaced with unnamed */
+ u8 sub_type; /* struct efi_generic_dev_path; */
+ u16 length; /* once we've moved to -std=c11 */
+ union {
+ struct {
+ u32 hid;
+ u32 uid;
+ } acpi;
+ struct {
+ u8 fn;
+ u8 dev;
+ } pci;
+ };
+} __attribute ((packed));
+
+#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER)
+struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len);
+#endif
+
static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
{
*npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr);
@@ -1493,4 +1533,10 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func);
+
+struct linux_efi_random_seed {
+ u32 size;
+ u8 bits[];
+};
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e7f358d2e5fc..b276e9ef0e0b 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int);
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, int, int);
+typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
typedef void (elevator_init_icq_fn) (struct io_cq *);
typedef void (elevator_exit_icq_fn) (struct io_cq *);
@@ -108,6 +108,11 @@ struct elevator_type
#define ELV_HASH_BITS 6
+void elv_rqhash_del(struct request_queue *q, struct request *rq);
+void elv_rqhash_add(struct request_queue *q, struct request *rq);
+void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
+struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
+
/*
* each queue has an elevator_queue associated with it
*/
@@ -139,7 +144,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
extern void elv_unregister_queue(struct request_queue *q);
-extern int elv_may_queue(struct request_queue *, int, int);
+extern int elv_may_queue(struct request_queue *, unsigned int);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *q, struct request *rq,
struct bio *bio, gfp_t gfp_mask);
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 422630b8e588..cea41a124a80 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -52,10 +52,17 @@
#define VERSION_LEN 256
#define MAX_VOLUME_NAME 512
+#define MAX_PATH_LEN 64
+#define MAX_DEVICES 8
/*
* For superblock
*/
+struct f2fs_device {
+ __u8 path[MAX_PATH_LEN];
+ __le32 total_segments;
+} __packed;
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -94,7 +101,8 @@ struct f2fs_super_block {
__le32 feature; /* defined features */
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
- __u8 reserved[871]; /* valid reserved region */
+ struct f2fs_device devs[MAX_DEVICES]; /* device list */
+ __u8 reserved[327]; /* valid reserved region */
} __packed;
/*
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index 9a79f0106da1..32c22cfb238b 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -26,7 +26,6 @@
#ifdef __KERNEL__
__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
-int fddi_change_mtu(struct net_device *dev, int new_mtu);
struct net_device *alloc_fddidev(int sizeof_priv);
#endif
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
deleted file mode 100644
index a44794e508df..000000000000
--- a/include/linux/fence-array.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * fence-array: aggregates fence to be waited together
- *
- * Copyright (C) 2016 Collabora Ltd
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
- * Authors:
- * Gustavo Padovan <gustavo@padovan.org>
- * Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_ARRAY_H
-#define __LINUX_FENCE_ARRAY_H
-
-#include <linux/fence.h>
-
-/**
- * struct fence_array_cb - callback helper for fence array
- * @cb: fence callback structure for signaling
- * @array: reference to the parent fence array object
- */
-struct fence_array_cb {
- struct fence_cb cb;
- struct fence_array *array;
-};
-
-/**
- * struct fence_array - fence to represent an array of fences
- * @base: fence base class
- * @lock: spinlock for fence handling
- * @num_fences: number of fences in the array
- * @num_pending: fences in the array still pending
- * @fences: array of the fences
- */
-struct fence_array {
- struct fence base;
-
- spinlock_t lock;
- unsigned num_fences;
- atomic_t num_pending;
- struct fence **fences;
-};
-
-extern const struct fence_ops fence_array_ops;
-
-/**
- * fence_is_array - check if a fence is from the array subsclass
- *
- * Return true if it is a fence_array and false otherwise.
- */
-static inline bool fence_is_array(struct fence *fence)
-{
- return fence->ops == &fence_array_ops;
-}
-
-/**
- * to_fence_array - cast a fence to a fence_array
- * @fence: fence to cast to a fence_array
- *
- * Returns NULL if the fence is not a fence_array,
- * or the fence_array otherwise.
- */
-static inline struct fence_array *to_fence_array(struct fence *fence)
-{
- if (fence->ops != &fence_array_ops)
- return NULL;
-
- return container_of(fence, struct fence_array, base);
-}
-
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any);
-
-#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
deleted file mode 100644
index 0d763053f97a..000000000000
--- a/include/linux/fence.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Fence mechanism for dma-buf to allow for asynchronous dma access
- *
- * Copyright (C) 2012 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_H
-#define __LINUX_FENCE_H
-
-#include <linux/err.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/printk.h>
-#include <linux/rcupdate.h>
-
-struct fence;
-struct fence_ops;
-struct fence_cb;
-
-/**
- * struct fence - software synchronization primitive
- * @refcount: refcount for this fence
- * @ops: fence_ops associated with this fence
- * @rcu: used for releasing fence with kfree_rcu
- * @cb_list: list of all callbacks to call
- * @lock: spin_lock_irqsave used for locking
- * @context: execution context this fence belongs to, returned by
- * fence_context_alloc()
- * @seqno: the sequence number of this fence inside the execution context,
- * can be compared to decide which fence would be signaled later.
- * @flags: A mask of FENCE_FLAG_* defined below
- * @timestamp: Timestamp when the fence was signaled.
- * @status: Optional, only valid if < 0, must be set before calling
- * fence_signal, indicates that the fence has completed with an error.
- *
- * the flags member must be manipulated and read using the appropriate
- * atomic ops (bit_*), so taking the spinlock will not be needed most
- * of the time.
- *
- * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
- * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
- * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
- * implementer of the fence for its own purposes. Can be used in different
- * ways by different fence implementers, so do not rely on this.
- *
- * Since atomic bitops are used, this is not guaranteed to be the case.
- * Particularly, if the bit was set, but fence_signal was called right
- * before this bit was set, it would have been able to set the
- * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
- * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
- * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
- * after fence_signal was called, any enable_signaling call will have either
- * been completed, or never called at all.
- */
-struct fence {
- struct kref refcount;
- const struct fence_ops *ops;
- struct rcu_head rcu;
- struct list_head cb_list;
- spinlock_t *lock;
- u64 context;
- unsigned seqno;
- unsigned long flags;
- ktime_t timestamp;
- int status;
-};
-
-enum fence_flag_bits {
- FENCE_FLAG_SIGNALED_BIT,
- FENCE_FLAG_ENABLE_SIGNAL_BIT,
- FENCE_FLAG_USER_BITS, /* must always be last member */
-};
-
-typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
-
-/**
- * struct fence_cb - callback for fence_add_callback
- * @node: used by fence_add_callback to append this struct to fence::cb_list
- * @func: fence_func_t to call
- *
- * This struct will be initialized by fence_add_callback, additional
- * data can be passed along by embedding fence_cb in another struct.
- */
-struct fence_cb {
- struct list_head node;
- fence_func_t func;
-};
-
-/**
- * struct fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
- *
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc. This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
- *
- * This function can be called called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->status may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->status if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to fence_default_wait for default implementation.
- * the fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
- */
-
-struct fence_ops {
- const char * (*get_driver_name)(struct fence *fence);
- const char * (*get_timeline_name)(struct fence *fence);
- bool (*enable_signaling)(struct fence *fence);
- bool (*signaled)(struct fence *fence);
- signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
- void (*release)(struct fence *fence);
-
- int (*fill_driver_data)(struct fence *fence, void *data, int size);
- void (*fence_value_str)(struct fence *fence, char *str, int size);
- void (*timeline_value_str)(struct fence *fence, char *str, int size);
-};
-
-void fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno);
-
-void fence_release(struct kref *kref);
-void fence_free(struct fence *fence);
-
-/**
- * fence_get - increases refcount of the fence
- * @fence: [in] fence to increase refcount of
- *
- * Returns the same fence, with refcount increased by 1.
- */
-static inline struct fence *fence_get(struct fence *fence)
-{
- if (fence)
- kref_get(&fence->refcount);
- return fence;
-}
-
-/**
- * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
- * @fence: [in] fence to increase refcount of
- *
- * Function returns NULL if no refcount could be obtained, or the fence.
- */
-static inline struct fence *fence_get_rcu(struct fence *fence)
-{
- if (kref_get_unless_zero(&fence->refcount))
- return fence;
- else
- return NULL;
-}
-
-/**
- * fence_put - decreases refcount of the fence
- * @fence: [in] fence to reduce refcount of
- */
-static inline void fence_put(struct fence *fence)
-{
- if (fence)
- kref_put(&fence->refcount, fence_release);
-}
-
-int fence_signal(struct fence *fence);
-int fence_signal_locked(struct fence *fence);
-signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func);
-bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
-void fence_enable_sw_signaling(struct fence *fence);
-
-/**
- * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * This function requires fence->lock to be held.
- */
-static inline bool
-fence_is_signaled_locked(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal_locked(fence);
- return true;
- }
-
- return false;
-}
-
-/**
- * fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * It's recommended for seqno fences to call fence_signal when the
- * operation is complete, it makes it possible to prevent issues from
- * wraparound between time of issue and time of use by checking the return
- * value of this function before calling hardware-specific wait instructions.
- */
-static inline bool
-fence_is_signaled(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal(fence);
- return true;
- }
-
- return false;
-}
-
-/**
- * fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
- *
- * Returns true if f1 is chronologically later than f2. Both fences must be
- * from the same context, since a seqno is not re-used across contexts.
- */
-static inline bool fence_is_later(struct fence *f1, struct fence *f2)
-{
- if (WARN_ON(f1->context != f2->context))
- return false;
-
- return (int)(f1->seqno - f2->seqno) > 0;
-}
-
-/**
- * fence_later - return the chronologically later fence
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
- *
- * Returns NULL if both fences are signaled, otherwise the fence that would be
- * signaled last. Both fences must be from the same context, since a seqno is
- * not re-used across contexts.
- */
-static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
-{
- if (WARN_ON(f1->context != f2->context))
- return NULL;
-
- /*
- * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
- * set if enable_signaling wasn't called, and enabling that here is
- * overkill.
- */
- if (fence_is_later(f1, f2))
- return fence_is_signaled(f1) ? NULL : f1;
- else
- return fence_is_signaled(f2) ? NULL : f2;
-}
-
-signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
-signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
- bool intr, signed long timeout);
-
-/**
- * fence_wait - sleep until the fence gets signaled
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- *
- * This function will return -ERESTARTSYS if interrupted by a signal,
- * or 0 if the fence was signaled. Other error values may be
- * returned on custom implementations.
- *
- * Performs a synchronous wait on this fence. It is assumed the caller
- * directly or indirectly holds a reference to the fence, otherwise the
- * fence might be freed before return, resulting in undefined behavior.
- */
-static inline signed long fence_wait(struct fence *fence, bool intr)
-{
- signed long ret;
-
- /* Since fence_wait_timeout cannot timeout with
- * MAX_SCHEDULE_TIMEOUT, only valid return values are
- * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
- */
- ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
-
- return ret < 0 ? ret : 0;
-}
-
-u64 fence_context_alloc(unsigned num);
-
-#define FENCE_TRACE(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_FENCE_TRACE)) \
- pr_info("f %llu#%u: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define FENCE_WARN(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#define FENCE_ERR(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#endif /* __LINUX_FENCE_H */
diff --git a/include/linux/file.h b/include/linux/file.h
index 7444f5feda12..61eb82cbafba 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -17,7 +17,7 @@ struct file_operations;
struct vfsmount;
struct dentry;
struct path;
-extern struct file *alloc_file(struct path *, fmode_t mode,
+extern struct file *alloc_file(const struct path *, fmode_t mode,
const struct file_operations *fop);
static inline void fput_light(struct file *file, int fput_needed)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1f09c521adfe..a0934e6c9bab 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/capability.h>
+#include <linux/cryptohash.h>
#include <net/sch_generic.h>
@@ -402,14 +403,16 @@ struct bpf_prog {
u16 jited:1, /* Is our filter JIT'ed? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
- dst_needed:1; /* Do we need dst entry? */
+ dst_needed:1, /* Do we need dst entry? */
+ xdp_adjust_head:1; /* Adjusting pkt head? */
kmemcheck_bitfield_end(meta);
- u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
+ u32 len; /* Number of filter blocks */
+ u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
- unsigned int (*bpf_func)(const struct sk_buff *skb,
- const struct bpf_insn *filter);
+ unsigned int (*bpf_func)(const void *ctx,
+ const struct bpf_insn *insn);
/* Instructions for interpreter */
union {
struct sock_filter insns[0];
@@ -435,10 +438,11 @@ struct bpf_skb_data_end {
struct xdp_buff {
void *data;
void *data_end;
+ void *data_hard_start;
};
/* compute the linear packet data range [data, data_end) which
- * will be accessed by cls_bpf and act_bpf programs
+ * will be accessed by cls_bpf, act_bpf and lwt programs
*/
static inline void bpf_compute_data_end(struct sk_buff *skb)
{
@@ -498,16 +502,27 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return BPF_PROG_RUN(prog, skb);
}
-static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
- struct xdp_buff *xdp)
+static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
+ struct xdp_buff *xdp)
{
- u32 ret;
+ /* Caller needs to hold rcu_read_lock() (!), otherwise program
+ * can be released while still running, or map elements could be
+ * freed early while still having concurrent users. XDP fastpath
+ * already takes rcu_read_lock() when fetching the program, so
+ * it's not necessary here anymore.
+ */
+ return BPF_PROG_RUN(prog, xdp);
+}
- rcu_read_lock();
- ret = BPF_PROG_RUN(prog, (void *)xdp);
- rcu_read_unlock();
+static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
+{
+ return prog->len * sizeof(struct bpf_insn);
+}
- return ret;
+static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
+{
+ return round_up(bpf_prog_insn_size(prog) +
+ sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
}
static inline unsigned int bpf_prog_size(unsigned int proglen)
@@ -590,7 +605,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
-bool bpf_helper_changes_skb_data(void *func);
+bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
new file mode 100644
index 000000000000..dba6e3c697c7
--- /dev/null
+++ b/include/linux/fpga/fpga-bridge.h
@@ -0,0 +1,60 @@
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#ifndef _LINUX_FPGA_BRIDGE_H
+#define _LINUX_FPGA_BRIDGE_H
+
+struct fpga_bridge;
+
+/**
+ * struct fpga_bridge_ops - ops for low level FPGA bridge drivers
+ * @enable_show: returns the FPGA bridge's status
+ * @enable_set: set a FPGA bridge as enabled or disabled
+ * @fpga_bridge_remove: set FPGA into a specific state during driver remove
+ */
+struct fpga_bridge_ops {
+ int (*enable_show)(struct fpga_bridge *bridge);
+ int (*enable_set)(struct fpga_bridge *bridge, bool enable);
+ void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
+};
+
+/**
+ * struct fpga_bridge - FPGA bridge structure
+ * @name: name of low level FPGA bridge
+ * @dev: FPGA bridge device
+ * @mutex: enforces exclusive reference to bridge
+ * @br_ops: pointer to struct of FPGA bridge ops
+ * @info: fpga image specific information
+ * @node: FPGA bridge list node
+ * @priv: low level driver private date
+ */
+struct fpga_bridge {
+ const char *name;
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to bridge */
+ const struct fpga_bridge_ops *br_ops;
+ struct fpga_image_info *info;
+ struct list_head node;
+ void *priv;
+};
+
+#define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev)
+
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
+ struct fpga_image_info *info);
+void fpga_bridge_put(struct fpga_bridge *bridge);
+int fpga_bridge_enable(struct fpga_bridge *bridge);
+int fpga_bridge_disable(struct fpga_bridge *bridge);
+
+int fpga_bridges_enable(struct list_head *bridge_list);
+int fpga_bridges_disable(struct list_head *bridge_list);
+void fpga_bridges_put(struct list_head *bridge_list);
+int fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list);
+
+int fpga_bridge_register(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv);
+void fpga_bridge_unregister(struct device *dev);
+
+#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 0940bf45e2f2..16551d5eac36 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -65,11 +65,26 @@ enum fpga_mgr_states {
/*
* FPGA Manager flags
* FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
+#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
+
+/**
+ * struct fpga_image_info - information specific to a FPGA image
+ * @flags: boolean flags as defined above
+ * @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
+ * @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
+ */
+struct fpga_image_info {
+ u32 flags;
+ u32 enable_timeout_us;
+ u32 disable_timeout_us;
+};
/**
* struct fpga_manager_ops - ops for low level fpga manager drivers
+ * @initial_header_size: Maximum number of bytes that should be passed into write_init
* @state: returns an enum value of the FPGA's state
* @write_init: prepare the FPGA to receive confuration data
* @write: write count bytes of configuration data to the FPGA
@@ -81,11 +96,14 @@ enum fpga_mgr_states {
* called, so leaving them out is fine.
*/
struct fpga_manager_ops {
+ size_t initial_header_size;
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
- int (*write_init)(struct fpga_manager *mgr, u32 flags,
+ int (*write_init)(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count);
int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
- int (*write_complete)(struct fpga_manager *mgr, u32 flags);
+ int (*write_complete)(struct fpga_manager *mgr,
+ struct fpga_image_info *info);
void (*fpga_remove)(struct fpga_manager *mgr);
};
@@ -109,14 +127,17 @@ struct fpga_manager {
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
-int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
const char *buf, size_t count);
-int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *image_name);
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
+struct fpga_manager *fpga_mgr_get(struct device *dev);
+
void fpga_mgr_put(struct fpga_manager *mgr);
int fpga_mgr_register(struct device *dev, const char *name,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index dc0478c07b2a..2ba074328894 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -28,7 +28,6 @@
#include <linux/uidgid.h>
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
-#include <linux/blk_types.h>
#include <linux/workqueue.h>
#include <linux/percpu-rwsem.h>
#include <linux/delayed_call.h>
@@ -38,6 +37,7 @@
struct backing_dev_info;
struct bdi_writeback;
+struct bio;
struct export_operations;
struct hd_geometry;
struct iovec;
@@ -152,58 +152,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define CHECK_IOVEC_ONLY -1
/*
- * The below are the various read and write flags that we support. Some of
- * them include behavioral modifiers that send information down to the
- * block layer and IO scheduler. They should be used along with a req_op.
- * Terminology:
- *
- * The block layer uses device plugging to defer IO a little bit, in
- * the hope that we will see more IO very shortly. This increases
- * coalescing of adjacent IO and thus reduces the number of IOs we
- * have to send to the device. It also allows for better queuing,
- * if the IO isn't mergeable. If the caller is going to be waiting
- * for the IO, then he must ensure that the device is unplugged so
- * that the IO is dispatched to the driver.
- *
- * All IO is handled async in Linux. This is fine for background
- * writes, but for reads or writes that someone waits for completion
- * on, we want to notify the block layer and IO scheduler so that they
- * know about it. That allows them to make better scheduling
- * decisions. So when the below references 'sync' and 'async', it
- * is referencing this priority hint.
- *
- * With that in mind, the available types are:
- *
- * READ A normal read operation. Device will be plugged.
- * READ_SYNC A synchronous read. Device is not plugged, caller can
- * immediately wait on this read without caring about
- * unplugging.
- * WRITE A normal async write. Device will be plugged.
- * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
- * the hint that someone will be waiting on this IO
- * shortly. The write equivalent of READ_SYNC.
- * WRITE_ODIRECT Special case write for O_DIRECT only.
- * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
- * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
- * non-volatile media on completion.
- * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
- * by a cache flush and data is guaranteed to be on
- * non-volatile media on completion.
- *
- */
-#define RW_MASK REQ_OP_WRITE
-
-#define READ REQ_OP_READ
-#define WRITE REQ_OP_WRITE
-
-#define READ_SYNC REQ_SYNC
-#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE)
-#define WRITE_ODIRECT REQ_SYNC
-#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
-#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
-#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
-
-/*
* Attribute flags. These should be or-ed together to figure out what
* has been changed!
*/
@@ -595,6 +543,7 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_LOOKUP 0x0002
#define IOP_NOFOLLOW 0x0004
#define IOP_XATTR 0x0008
+#define IOP_DEFAULT_READLINK 0x0010
/*
* Keep mostly read-only and often accessed (especially for
@@ -1778,11 +1727,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
unsigned long, loff_t *, int);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
+extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
+ struct inode *inode_out, loff_t pos_out,
+ u64 *len, bool is_dedupe);
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out, u64 len);
+extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dest, loff_t destoff,
+ loff_t len, bool *is_same);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
+static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ u64 len)
+{
+ int ret;
+
+ sb_start_write(file_inode(file_out)->i_sb);
+ ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+ sb_end_write(file_inode(file_out)->i_sb);
+
+ return ret;
+}
+
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
@@ -2123,11 +2091,11 @@ extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
extern long do_mount(const char *, const char __user *,
const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(struct path *);
+extern struct vfsmount *collect_mounts(const struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
struct vfsmount *);
-extern int vfs_statfs(struct path *, struct kstatfs *);
+extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
extern int vfs_ustat(dev_t, struct kstatfs *);
@@ -2499,19 +2467,6 @@ extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
#ifdef CONFIG_BLOCK
-static inline bool op_is_write(unsigned int op)
-{
- return op == REQ_OP_READ ? false : true;
-}
-
-/*
- * return data direction, READ or WRITE
- */
-static inline int bio_data_dir(struct bio *bio)
-{
- return op_is_write(bio_op(bio)) ? WRITE : READ;
-}
-
extern void check_disk_size_change(struct gendisk *disk,
struct block_device *bdev);
extern int revalidate_disk(struct gendisk *);
@@ -2709,7 +2664,7 @@ extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
extern bool is_subdir(struct dentry *, struct dentry *);
-extern bool path_is_under(struct path *, struct path *);
+extern bool path_is_under(const struct path *, const struct path *);
extern char *file_path(struct file *, char *, int);
@@ -2782,7 +2737,6 @@ static inline void remove_inode_hash(struct inode *inode)
extern void inode_sb_list_add(struct inode *inode);
#ifdef CONFIG_BLOCK
-extern blk_qc_t submit_bio(struct bio *);
extern int bdev_read_only(struct block_device *);
#endif
extern int set_blocksize(struct block_device *, int);
@@ -2914,7 +2868,6 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
int vfs_getattr_nosec(struct path *path, struct kstat *stat);
extern int vfs_getattr(struct path *, struct kstat *);
@@ -2935,6 +2888,7 @@ extern int vfs_lstat(const char __user *, struct kstat *);
extern int vfs_fstat(unsigned int, struct kstat *);
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
+extern int vfs_readlink(struct dentry *, char __user *, int);
extern int __generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
@@ -2949,8 +2903,10 @@ extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
extern struct super_block *get_super_thawed(struct block_device *);
+extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
+extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index ff8b11b26f31..c074b670aa99 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -18,73 +18,9 @@
#include <crypto/skcipher.h>
#include <uapi/linux/fs.h>
-#define FS_KEY_DERIVATION_NONCE_SIZE 16
-#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
-
-#define FS_POLICY_FLAGS_PAD_4 0x00
-#define FS_POLICY_FLAGS_PAD_8 0x01
-#define FS_POLICY_FLAGS_PAD_16 0x02
-#define FS_POLICY_FLAGS_PAD_32 0x03
-#define FS_POLICY_FLAGS_PAD_MASK 0x03
-#define FS_POLICY_FLAGS_VALID 0x03
-
-/* Encryption algorithms */
-#define FS_ENCRYPTION_MODE_INVALID 0
-#define FS_ENCRYPTION_MODE_AES_256_XTS 1
-#define FS_ENCRYPTION_MODE_AES_256_GCM 2
-#define FS_ENCRYPTION_MODE_AES_256_CBC 3
-#define FS_ENCRYPTION_MODE_AES_256_CTS 4
-
-/**
- * Encryption context for inode
- *
- * Protector format:
- * 1 byte: Protector format (1 = this version)
- * 1 byte: File contents encryption mode
- * 1 byte: File names encryption mode
- * 1 byte: Flags
- * 8 bytes: Master Key descriptor
- * 16 bytes: Encryption Key derivation nonce
- */
-struct fscrypt_context {
- u8 format;
- u8 contents_encryption_mode;
- u8 filenames_encryption_mode;
- u8 flags;
- u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
- u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
-} __packed;
-
-/* Encryption parameters */
-#define FS_XTS_TWEAK_SIZE 16
-#define FS_AES_128_ECB_KEY_SIZE 16
-#define FS_AES_256_GCM_KEY_SIZE 32
-#define FS_AES_256_CBC_KEY_SIZE 32
-#define FS_AES_256_CTS_KEY_SIZE 32
-#define FS_AES_256_XTS_KEY_SIZE 64
-#define FS_MAX_KEY_SIZE 64
-
-#define FS_KEY_DESC_PREFIX "fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE 8
-
-/* This is passed in from userspace into the kernel keyring */
-struct fscrypt_key {
- u32 mode;
- u8 raw[FS_MAX_KEY_SIZE];
- u32 size;
-} __packed;
-
-struct fscrypt_info {
- u8 ci_data_mode;
- u8 ci_filename_mode;
- u8 ci_flags;
- struct crypto_skcipher *ci_ctfm;
- struct key *ci_keyring_key;
- u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
-};
+#define FS_CRYPTO_BLOCK_SIZE 16
-#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
-#define FS_WRITE_PATH_FL 0x00000002
+struct fscrypt_info;
struct fscrypt_ctx {
union {
@@ -102,19 +38,6 @@ struct fscrypt_ctx {
u8 mode; /* Encryption mode for tfm */
};
-struct fscrypt_completion_result {
- struct completion completion;
- int res;
-};
-
-#define DECLARE_FS_COMPLETION_RESULT(ecr) \
- struct fscrypt_completion_result ecr = { \
- COMPLETION_INITIALIZER((ecr).completion), 0 }
-
-#define FS_FNAME_NUM_SCATTER_ENTRIES 4
-#define FS_CRYPTO_BLOCK_SIZE 16
-#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
-
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
@@ -154,9 +77,15 @@ struct fscrypt_name {
#define fname_len(p) ((p)->disk_name.len)
/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
* crypto opertions for filesystems
*/
struct fscrypt_operations {
+ unsigned int flags;
int (*get_context)(struct inode *, void *, size_t);
int (*key_prefix)(struct inode *, u8 **);
int (*prepare_context)(struct inode *);
@@ -206,7 +135,7 @@ static inline struct page *fscrypt_control_page(struct page *page)
#endif
}
-static inline int fscrypt_has_encryption_key(struct inode *inode)
+static inline int fscrypt_has_encryption_key(const struct inode *inode)
{
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
return (inode->i_crypt_info != NULL);
@@ -238,25 +167,25 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
-int fscrypt_initialize(void);
-
-extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
+extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
-extern int fscrypt_decrypt_page(struct page *);
+extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+ unsigned int, unsigned int,
+ u64, gfp_t);
+extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+ unsigned int, u64);
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern void fscrypt_restore_control_page(struct page *);
-extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
+extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
/* policy.c */
-extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
-extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
+extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
+extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
void *, bool);
/* keyinfo.c */
-extern int get_crypt_info(struct inode *);
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
@@ -264,8 +193,8 @@ extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
int lookup, struct fscrypt_name *);
extern void fscrypt_free_filename(struct fscrypt_name *);
-extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
-extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
+extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
@@ -275,7 +204,7 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
#endif
/* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i,
gfp_t f)
{
return ERR_PTR(-EOPNOTSUPP);
@@ -286,13 +215,18 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
return;
}
-static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
- struct page *p, gfp_t f)
+static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i,
+ struct page *p,
+ unsigned int len,
+ unsigned int offs,
+ u64 lblk_num, gfp_t f)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline int fscrypt_notsupp_decrypt_page(struct page *p)
+static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num)
{
return -EOPNOTSUPP;
}
@@ -313,21 +247,21 @@ static inline void fscrypt_notsupp_restore_control_page(struct page *p)
return;
}
-static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
+static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p,
sector_t s, unsigned int f)
{
return -EOPNOTSUPP;
}
/* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct file *f,
- const struct fscrypt_policy *p)
+static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f,
+ const void __user *arg)
{
return -EOPNOTSUPP;
}
-static inline int fscrypt_notsupp_get_policy(struct inode *i,
- struct fscrypt_policy *p)
+static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f,
+ void __user *arg)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 649e9171a9b3..3efa3b861d44 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -29,83 +29,112 @@
* #ifdefs.
*/
struct ccsr_guts {
- __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
- __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
- __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
- __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
- __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
- __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */
+ u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
+ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
+ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and
+ * Control Register
+ */
+ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
+ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
+ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */
u8 res018[0x20 - 0x18];
- __be32 porcir; /* 0x.0020 - POR Configuration Information Register */
+ u32 porcir; /* 0x.0020 - POR Configuration Information
+ * Register
+ */
u8 res024[0x30 - 0x24];
- __be32 gpiocr; /* 0x.0030 - GPIO Control Register */
+ u32 gpiocr; /* 0x.0030 - GPIO Control Register */
u8 res034[0x40 - 0x34];
- __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
+ u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data
+ * Register
+ */
u8 res044[0x50 - 0x44];
- __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */
+ u32 gpindr; /* 0x.0050 - General-Purpose Input Data
+ * Register
+ */
u8 res054[0x60 - 0x54];
- __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
- __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */
- __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
+ u32 pmuxcr; /* 0x.0060 - Alternate Function Signal
+ * Multiplex Control
+ */
+ u32 pmuxcr2; /* 0x.0064 - Alternate function signal
+ * multiplex control 2
+ */
+ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
u8 res06c[0x70 - 0x6c];
- __be32 devdisr; /* 0x.0070 - Device Disable Control */
+ u32 devdisr; /* 0x.0070 - Device Disable Control */
#define CCSR_GUTS_DEVDISR_TB1 0x00001000
#define CCSR_GUTS_DEVDISR_TB0 0x00004000
- __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
+ u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
u8 res078[0x7c - 0x78];
- __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
- __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
- __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */
- __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */
- __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */
- __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
- __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */
- __be32 ectrstcr; /* 0x.0098 - Exception reset control register */
- __be32 autorstsr; /* 0x.009c - Automatic reset status register */
- __be32 pvr; /* 0x.00a0 - Processor Version Register */
- __be32 svr; /* 0x.00a4 - System Version Register */
+ u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control
+ * Register
+ */
+ u32 powmgtcsr; /* 0x.0080 - Power Management Status and
+ * Control Register
+ */
+ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter
+ * Configuration Register
+ */
+ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter
+ * Configuration Register
+ */
+ u32 pmcdr; /* 0x.008c - 4Power management clock disable
+ * register
+ */
+ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
+ u32 rstrscr; /* 0x.0094 - Reset Request Status and
+ * Control Register
+ */
+ u32 ectrstcr; /* 0x.0098 - Exception reset control register */
+ u32 autorstsr; /* 0x.009c - Automatic reset status register */
+ u32 pvr; /* 0x.00a0 - Processor Version Register */
+ u32 svr; /* 0x.00a4 - System Version Register */
u8 res0a8[0xb0 - 0xa8];
- __be32 rstcr; /* 0x.00b0 - Reset Control Register */
+ u32 rstcr; /* 0x.00b0 - Reset Control Register */
u8 res0b4[0xc0 - 0xb4];
- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
+ u32 iovselsr; /* 0x.00c0 - I/O voltage select status register
Called 'elbcvselcr' on 86xx SOCs */
u8 res0c4[0x100 - 0xc4];
- __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
+ u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
There are 16 registers */
u8 res140[0x224 - 0x140];
- __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
- __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
+ u32 iodelay1; /* 0x.0224 - IO delay control register 1 */
+ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */
u8 res22c[0x604 - 0x22c];
- __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
+ u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
u8 res608[0x800 - 0x608];
- __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
+ u32 clkdvdr; /* 0x.0800 - Clock Divide Register */
u8 res804[0x900 - 0x804];
- __be32 ircr; /* 0x.0900 - Infrared Control Register */
+ u32 ircr; /* 0x.0900 - Infrared Control Register */
u8 res904[0x908 - 0x904];
- __be32 dmacr; /* 0x.0908 - DMA Control Register */
+ u32 dmacr; /* 0x.0908 - DMA Control Register */
u8 res90c[0x914 - 0x90c];
- __be32 elbccr; /* 0x.0914 - eLBC Control Register */
+ u32 elbccr; /* 0x.0914 - eLBC Control Register */
u8 res918[0xb20 - 0x918];
- __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
- __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
- __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
+ u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
+ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
+ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
u8 resb2c[0xe00 - 0xb2c];
- __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */
+ u32 clkocr; /* 0x.0e00 - Clock Out Select Register */
u8 rese04[0xe10 - 0xe04];
- __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
+ u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
u8 rese14[0xe20 - 0xe14];
- __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
- __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */
+ u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
+ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override
+ * register
+ */
u8 rese28[0xf04 - 0xe28];
- __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
- __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
+ u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
+ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
u8 resf0c[0xf2c - 0xf0c];
- __be32 itcr; /* 0x.0f2c - Internal transaction control register */
+ u32 itcr; /* 0x.0f2c - Internal transaction control
+ * register
+ */
u8 resf30[0xf40 - 0xf30];
- __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
- __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
+ u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
+ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
} __attribute__ ((packed));
+u32 fsl_guts_get_svr(void);
/* Alternate function signal multiplex control */
#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index f2912914141a..60cef8227534 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -100,6 +100,7 @@ struct fsl_usb2_platform_data {
unsigned already_suspended:1;
unsigned has_fsl_erratum_a007792:1;
unsigned has_fsl_erratum_a005275:1;
+ unsigned has_fsl_erratum_a005697:1;
unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index b8bcc058e031..b43d3f5bd9ea 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -17,7 +17,7 @@
#include <linux/bug.h>
/* Notify this dentry's parent about a child's events. */
-static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
+static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
{
if (!dentry)
dentry = path->dentry;
@@ -28,7 +28,7 @@ static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u3
/* simple call site for access decisions */
static inline int fsnotify_perm(struct file *file, int mask)
{
- struct path *path = &file->f_path;
+ const struct path *path = &file->f_path;
/*
* Do not use file_inode() here or anywhere in this file to get the
* inode. That would break *notity on overlayfs.
@@ -176,7 +176,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_access(struct file *file)
{
- struct path *path = &file->f_path;
+ const struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_ACCESS;
@@ -194,7 +194,7 @@ static inline void fsnotify_access(struct file *file)
*/
static inline void fsnotify_modify(struct file *file)
{
- struct path *path = &file->f_path;
+ const struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_MODIFY;
@@ -212,7 +212,7 @@ static inline void fsnotify_modify(struct file *file)
*/
static inline void fsnotify_open(struct file *file)
{
- struct path *path = &file->f_path;
+ const struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_OPEN;
@@ -228,7 +228,7 @@ static inline void fsnotify_open(struct file *file)
*/
static inline void fsnotify_close(struct file *file)
{
- struct path *path = &file->f_path;
+ const struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 79467b239fcf..487246546ebe 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -96,7 +96,7 @@ struct fsnotify_ops {
struct inode *inode,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
- u32 mask, void *data, int data_type,
+ u32 mask, const void *data, int data_type,
const unsigned char *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
@@ -245,9 +245,9 @@ struct fsnotify_mark {
/* called from the vfs helpers */
/* main fsnotify call to send events */
-extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
const unsigned char *name, u32 cookie);
-extern int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
+extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern u32 fsnotify_get_cookie(void);
@@ -323,8 +323,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(str
extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
/* find (and take a reference) to a mark associated with group and vfsmount */
extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
-/* copy the values from old into new */
-extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
/* set the ignored_mask of a mark */
extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
/* set the mask of a mark (might pin the object into memory */
@@ -357,13 +355,13 @@ extern void fsnotify_init_event(struct fsnotify_event *event,
#else
-static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
const unsigned char *name, u32 cookie)
{
return 0;
}
-static inline int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
+static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
{
return 0;
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index b3d34d3e0e7e..3633e8beff39 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -398,6 +398,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
+void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -645,6 +646,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
+#define ftrace_ops_set_global_filter(ops) do { } while (0)
static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos) { return -ENODEV; }
@@ -945,6 +947,10 @@ extern int __disable_trace_on_warning;
#define INIT_TRACE_RECURSION .trace_recursion = 0,
#endif
+int tracepoint_printk_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 6435f46d6e13..7c5b694864cd 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -1,14 +1,14 @@
#ifndef _LINUX_FUTEX_H
#define _LINUX_FUTEX_H
+#include <linux/ktime.h>
#include <uapi/linux/futex.h>
struct inode;
struct mm_struct;
struct task_struct;
-union ktime;
-long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
extern int
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 851671742790..8bd28ce6d76e 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -17,8 +17,9 @@ enum fwnode_type {
FWNODE_OF,
FWNODE_ACPI,
FWNODE_ACPI_DATA,
+ FWNODE_ACPI_STATIC,
FWNODE_PDATA,
- FWNODE_IRQCHIP,
+ FWNODE_IRQCHIP
};
struct fwnode_handle {
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e0341af6950e..76f39754e7b0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -146,15 +146,6 @@ enum {
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
};
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
-struct blk_scsi_cmd_filter {
- unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
- unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
- struct kobject kobj;
-};
-
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 667c31101b8b..377257d8f7e3 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -259,16 +259,7 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
* {{{2
*/
#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
-static struct genl_family ZZZ_genl_family __read_mostly = {
- .id = GENL_ID_GENERATE,
- .name = __stringify(GENL_MAGIC_FAMILY),
- .version = GENL_MAGIC_VERSION,
-#ifdef GENL_MAGIC_FAMILY_HDRSZ
- .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
-#endif
- .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
-};
-
+static struct genl_family ZZZ_genl_family;
/*
* Magic: define multicast groups
* Magic: define multicast group registration helper
@@ -302,11 +293,23 @@ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
#undef GENL_mc_group
#define GENL_mc_group(group)
+static struct genl_family ZZZ_genl_family __ro_after_init = {
+ .name = __stringify(GENL_MAGIC_FAMILY),
+ .version = GENL_MAGIC_VERSION,
+#ifdef GENL_MAGIC_FAMILY_HDRSZ
+ .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
+#endif
+ .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
+ .ops = ZZZ_genl_ops,
+ .n_ops = ARRAY_SIZE(ZZZ_genl_ops),
+ .mcgrps = ZZZ_genl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
+ .module = THIS_MODULE,
+};
+
int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
{
- return genl_register_family_with_ops_groups(&ZZZ_genl_family, \
- ZZZ_genl_ops, \
- ZZZ_genl_mcgrps);
+ return genl_register_family(&ZZZ_genl_family);
}
void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f8041f9de31e..4175dca4ac39 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -506,6 +506,8 @@ extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
struct page_frag_cache;
+extern void __page_frag_drain(struct page *page, unsigned int order,
+ unsigned int count);
extern void *__alloc_page_frag(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask);
extern void __free_page_frag(void *addr);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 24e2cc56beb1..c2748accea71 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -82,8 +82,6 @@ enum single_ended_mode {
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @irq_not_threaded: flag must be set if @can_sleep is set but the
- * IRQs don't need to be threaded
* @read_reg: reader function for generic GPIO
* @write_reg: writer function for generic GPIO
* @pin2mask: some generic GPIO controllers work with the big-endian bits
@@ -91,7 +89,7 @@ enum single_ended_mode {
* bit. This callback assigns the right bit mask.
* @reg_dat: data (in) register for generic GPIO
* @reg_set: output set register (out=high) for generic GPIO
- * @reg_clk: output clear register (out=low) for generic GPIO
+ * @reg_clr: output clear register (out=low) for generic GPIO
* @reg_dir: direction setting register for generic GPIO
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
* <register width> * 8
@@ -109,8 +107,10 @@ enum single_ended_mode {
* for GPIO IRQs, provided by GPIO driver
* @irq_default_type: default IRQ triggering type applied during GPIO driver
* initialization, provided by GPIO driver
- * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
- * provided by GPIO driver
+ * @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number,
+ * provided by GPIO driver for chained interrupt (not for nested
+ * interrupts).
+ * @irq_nested: True if set the interrupt handling is nested.
* @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
* bits set to one
* @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
@@ -166,7 +166,6 @@ struct gpio_chip {
u16 ngpio;
const char *const *names;
bool can_sleep;
- bool irq_not_threaded;
#if IS_ENABLED(CONFIG_GPIO_GENERIC)
unsigned long (*read_reg)(void __iomem *reg);
@@ -192,7 +191,8 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
- int irq_parent;
+ int irq_chained_parent;
+ bool irq_nested;
bool irq_need_valid_mask;
unsigned long *irq_valid_mask;
struct lock_class_key *lock_key;
@@ -270,24 +270,40 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
int parent_irq,
irq_flow_handler_t parent_handler);
+void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq);
+
int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
+ bool nested,
struct lock_class_key *lock_key);
+/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
+ handler, type, true, NULL);
+}
+
#ifdef CONFIG_LOCKDEP
#define gpiochip_irqchip_add(...) \
( \
({ \
static struct lock_class_key _key; \
- _gpiochip_irqchip_add(__VA_ARGS__, &_key); \
+ _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
}) \
)
#else
#define gpiochip_irqchip_add(...) \
- _gpiochip_irqchip_add(__VA_ARGS__, NULL)
+ _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
#endif
#endif /* CONFIG_GPIOLIB_IRQCHIP */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index ee2d8c6f9130..0b71024c082c 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -2,7 +2,6 @@
#define _GPIO_KEYS_H
struct device;
-struct gpio_desc;
/**
* struct gpio_keys_button - configuration parameters
@@ -18,7 +17,6 @@ struct gpio_desc;
* disable button via sysfs
* @value: axis value for %EV_ABS
* @irq: Irq number in case of interrupt keys
- * @gpiod: GPIO descriptor
*/
struct gpio_keys_button {
unsigned int code;
@@ -31,7 +29,6 @@ struct gpio_keys_button {
bool can_disable;
int value;
unsigned int irq;
- struct gpio_desc *gpiod;
};
/**
@@ -46,7 +43,7 @@ struct gpio_keys_button {
* @name: input device name
*/
struct gpio_keys_platform_data {
- struct gpio_keys_button *buttons;
+ const struct gpio_keys_button *buttons;
int nbuttons;
unsigned int poll_interval;
unsigned int rep:1;
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index e31bcd4c7859..97585d9679f3 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -93,8 +93,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
int hdlc_open(struct net_device *dev);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev);
-/* May be used by hardware driver */
-int hdlc_change_mtu(struct net_device *dev, int new_mtu);
/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */
netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index e9744202fa29..edbb4fc674ed 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -78,6 +78,8 @@ enum hdmi_picture_aspect {
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
+ HDMI_PICTURE_ASPECT_64_27,
+ HDMI_PICTURE_ASPECT_256_135,
HDMI_PICTURE_ASPECT_RESERVED,
};
diff --git a/include/linux/hid.h b/include/linux/hid.h
index b2ec82712baa..28f38e2b8f30 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -231,7 +231,11 @@ struct hid_item {
#define HID_DG_TAP 0x000d0035
#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
+#define HID_DG_BATTERYSTRENGTH 0x000d003b
#define HID_DG_INVERT 0x000d003c
+#define HID_DG_TILT_X 0x000d003d
+#define HID_DG_TILT_Y 0x000d003e
+#define HID_DG_TWIST 0x000d0041
#define HID_DG_TIPSWITCH 0x000d0042
#define HID_DG_TIPSWITCH2 0x000d0043
#define HID_DG_BARRELSWITCH 0x000d0044
@@ -479,6 +483,7 @@ struct hid_input {
struct list_head list;
struct hid_report *report;
struct input_dev *input;
+ bool registered;
};
enum hid_type {
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index 8ec23fb0b412..402f99e328d4 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -32,7 +32,6 @@ struct hippi_cb {
};
__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-int hippi_change_mtu(struct net_device *dev, int new_mtu);
int hippi_mac_addr(struct net_device *dev, void *p);
int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
struct net_device *alloc_hippi_dev(int sizeof_priv);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5e00f80b1535..cdab81ba29f8 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -228,8 +228,8 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t
static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
{
- timer->node.expires.tv64 = tv64;
- timer->_softexpires.tv64 = tv64;
+ timer->node.expires = tv64;
+ timer->_softexpires = tv64;
}
static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
@@ -256,11 +256,11 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
{
- return timer->node.expires.tv64;
+ return timer->node.expires;
}
static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
{
- return timer->_softexpires.tv64;
+ return timer->_softexpires;
}
static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
@@ -297,7 +297,7 @@ extern void hrtimer_peek_ahead_timers(void);
* this resolution values.
*/
# define HIGH_RES_NSEC 1
-# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC }
+# define KTIME_HIGH_RES (HIGH_RES_NSEC)
# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
@@ -333,7 +333,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
* hrtimer_start_range_ns() to prevent short timeouts.
*/
if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
- rem.tv64 -= hrtimer_resolution;
+ rem -= hrtimer_resolution;
return rem;
}
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e35e6de633b9..97e478d6b690 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -1,12 +1,12 @@
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
-extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
+extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
-extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
+extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
@@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page)
return 1;
}
-extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
+extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
@@ -189,6 +189,8 @@ static inline void deferred_split_huge_page(struct page *page) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
+static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long address, bool freeze, struct page *page) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
unsigned long address, bool freeze, struct page *page) {}
@@ -210,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
return NULL;
}
-static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
+static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
{
return 0;
}
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 34a0dc18f327..bee0827766a3 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -30,8 +30,7 @@
* Must not be NULL. *OBSOLETE*
* @read: New API. drivers can fill up to max bytes of data
* into the buffer. The buffer is aligned for any type
- * and max is guaranteed to be >= to that alignment
- * (either 4 or 8 depending on architecture).
+ * and max is a multiple of 4 and >= 32 bytes.
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
* (per mill).
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 9d2f8bde7d12..78d59dba563e 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -298,8 +298,8 @@ enum hwmon_pwm_attributes {
* Channel number
* The function returns the file permissions.
* If the return value is 0, no attribute will be created.
- * @read: Read callback. Optional. If not provided, attributes
- * will not be readable.
+ * @read: Read callback for data attributes. Mandatory if readable
+ * data attributes are present.
* Parameters are:
* @dev: Pointer to hardware monitoring device
* @type: Sensor type
@@ -308,8 +308,19 @@ enum hwmon_pwm_attributes {
* Channel number
* @val: Pointer to returned value
* The function returns 0 on success or a negative error number.
- * @write: Write callback. Optional. If not provided, attributes
- * will not be writable.
+ * @read_string:
+ * Read callback for string attributes. Mandatory if string
+ * attributes are present.
+ * Parameters are:
+ * @dev: Pointer to hardware monitoring device
+ * @type: Sensor type
+ * @attr: Sensor attribute
+ * @channel:
+ * Channel number
+ * @str: Pointer to returned string
+ * The function returns 0 on success or a negative error number.
+ * @write: Write callback for data attributes. Mandatory if writeable
+ * data attributes are present.
* Parameters are:
* @dev: Pointer to hardware monitoring device
* @type: Sensor type
@@ -324,6 +335,8 @@ struct hwmon_ops {
u32 attr, int channel);
int (*read)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val);
+ int (*read_string)(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, char **str);
int (*write)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val);
};
@@ -349,7 +362,9 @@ struct hwmon_chip_info {
const struct hwmon_channel_info **info;
};
+/* hwmon_device_register() is deprecated */
struct device *hwmon_device_register(struct device *dev);
+
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -362,12 +377,12 @@ struct device *
hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
- const struct attribute_group **groups);
+ const struct attribute_group **extra_groups);
struct device *
devm_hwmon_device_register_with_info(struct device *dev,
- const char *name, void *drvdata,
- const struct hwmon_chip_info *info,
- const struct attribute_group **groups);
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index cd184bdca58f..42fe43fb0c80 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -696,7 +696,7 @@ enum vmbus_device_type {
HV_FCOPY,
HV_BACKUP,
HV_DM,
- HV_UNKOWN,
+ HV_UNKNOWN,
};
struct vmbus_device {
@@ -1119,6 +1119,12 @@ struct hv_driver {
struct device_driver driver;
+ /* dynamic device GUID's */
+ struct {
+ spinlock_t lock;
+ struct list_head list;
+ } dynids;
+
int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
int (*remove)(struct hv_device *);
void (*shutdown)(struct hv_device *);
@@ -1447,6 +1453,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel);
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+void vmbus_setevent(struct vmbus_channel *channel);
/*
* Negotiated version with the Host.
*/
@@ -1479,10 +1486,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
* there is room for the producer to send the pending packet.
*/
-static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+static inline void hv_signal_on_read(struct vmbus_channel *channel)
{
u32 cur_write_sz;
u32 pending_sz;
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
/*
* Issue a full memory barrier before making the signaling decision.
@@ -1500,14 +1508,14 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
/* If the other end is not blocked on write don't bother. */
if (pending_sz == 0)
- return false;
+ return;
cur_write_sz = hv_get_bytes_to_write(rbi);
if (cur_write_sz >= pending_sz)
- return true;
+ vmbus_setevent(channel);
- return false;
+ return;
}
/*
@@ -1519,31 +1527,23 @@ static inline struct vmpacket_descriptor *
get_next_pkt_raw(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 read_loc = ring_info->priv_read_index;
+ u32 priv_read_loc = ring_info->priv_read_index;
void *ring_buffer = hv_get_ring_buffer(ring_info);
- struct vmpacket_descriptor *cur_desc;
- u32 packetlen;
u32 dsize = ring_info->ring_datasize;
- u32 delta = read_loc - ring_info->ring_buffer->read_index;
+ /*
+ * delta is the difference between what is available to read and
+ * what was already consumed in place. We commit read index after
+ * the whole batch is processed.
+ */
+ u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
+ priv_read_loc - ring_info->ring_buffer->read_index :
+ (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
return NULL;
- if ((read_loc + sizeof(*cur_desc)) > dsize)
- return NULL;
-
- cur_desc = ring_buffer + read_loc;
- packetlen = cur_desc->len8 << 3;
-
- /*
- * If the packet under consideration is wrapping around,
- * return failure.
- */
- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
- return NULL;
-
- return cur_desc;
+ return ring_buffer + priv_read_loc;
}
/*
@@ -1555,16 +1555,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
struct vmpacket_descriptor *desc)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 read_loc = ring_info->priv_read_index;
u32 packetlen = desc->len8 << 3;
u32 dsize = ring_info->ring_datasize;
- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
- BUG();
/*
* Include the packet trailer.
*/
ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+ ring_info->priv_read_index %= dsize;
}
/*
@@ -1589,8 +1587,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel)
virt_rmb();
ring_info->ring_buffer->read_index = ring_info->priv_read_index;
- if (hv_need_to_signal_on_read(ring_info))
- vmbus_set_event(channel);
+ hv_signal_on_read(channel);
}
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index c2e3324f9468..a1385023a29b 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -50,31 +50,4 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
-/**
- * smbus_host_notify - internal structure used by the Host Notify mechanism.
- * @adapter: the I2C adapter associated with this struct
- * @work: worker used to schedule the IRQ in the slave device
- * @lock: spinlock to check if a notification is already pending
- * @pending: flag set when a notification is pending (any new notification will
- * be rejected if pending is true)
- * @payload: the actual payload of the Host Notify event
- * @addr: the address of the slave device which raised the notification
- *
- * This struct needs to be allocated by i2c_setup_smbus_host_notify() and does
- * not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a
- * managed resource to clean this up when the adapter get released.
- */
-struct smbus_host_notify {
- struct i2c_adapter *adapter;
- struct work_struct work;
- spinlock_t lock;
- bool pending;
- u16 payload;
- u8 addr;
-};
-
-struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap);
-int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify,
- unsigned short addr, unsigned int data);
-
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 6422eef428c4..b2109c522dec 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -30,6 +30,7 @@
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+#include <linux/irqdomain.h> /* for Host Notify IRQ */
#include <linux/of.h> /* for struct device_node */
#include <linux/swab.h> /* for swab16 */
#include <uapi/linux/i2c.h>
@@ -135,7 +136,8 @@ enum i2c_alert_protocol {
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @attach_adapter: Callback for bus addition (deprecated)
- * @probe: Callback for device binding
+ * @probe: Callback for device binding - soon to be deprecated
+ * @probe_new: New callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -178,6 +180,11 @@ struct i2c_driver {
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
int (*remove)(struct i2c_client *);
+ /* New driver model interface to aid the seamless removal of the
+ * current probe()'s, more commonly unused than used second parameter.
+ */
+ int (*probe_new)(struct i2c_client *);
+
/* driver model interfaces that don't relate to enumeration */
void (*shutdown)(struct i2c_client *);
@@ -243,6 +250,8 @@ struct i2c_client {
extern struct i2c_client *i2c_verify_client(struct device *dev);
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
+extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+ const struct i2c_client *client);
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
@@ -567,6 +576,8 @@ struct i2c_adapter {
struct i2c_bus_recovery_info *bus_recovery_info;
const struct i2c_adapter_quirks *quirks;
+
+ struct irq_domain *host_notify_domain;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -739,6 +750,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
}
+int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
/**
* module_i2c_driver() - Helper macro for registering a modular I2C driver
* @__i2c_driver: i2c_driver struct
@@ -774,6 +786,10 @@ extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
+extern const struct of_device_id
+*i2c_of_match_device(const struct of_device_id *matches,
+ struct i2c_client *client);
+
#else
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -790,6 +806,14 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
{
return NULL;
}
+
+static inline const struct of_device_id
+*i2c_of_match_device(const struct of_device_id *matches,
+ struct i2c_client *client)
+{
+ return NULL;
+}
+
#endif /* CONFIG_OF */
#if IS_ENABLED(CONFIG_ACPI)
diff --git a/include/linux/i2c/mlxcpld.h b/include/linux/i2c/mlxcpld.h
new file mode 100644
index 000000000000..b08dcb183fca
--- /dev/null
+++ b/include/linux/i2c/mlxcpld.h
@@ -0,0 +1,52 @@
+/*
+ * mlxcpld.h - Mellanox I2C multiplexer support in CPLD
+ *
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_I2C_MLXCPLD_H
+#define _LINUX_I2C_MLXCPLD_H
+
+/* Platform data for the CPLD I2C multiplexers */
+
+/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
+ * @adap_ids - adapter array
+ * @num_adaps - number of adapters
+ * @sel_reg_addr - mux select register offset in CPLD space
+ */
+struct mlxcpld_mux_plat_data {
+ int *adap_ids;
+ int num_adaps;
+ int sel_reg_addr;
+};
+
+#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 083d61e92706..3c01b89aed67 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -18,12 +18,11 @@
#include <linux/rcupdate.h>
/*
- * We want shallower trees and thus more bits covered at each layer. 8
- * bits gives us large enough first layer for most use cases and maximum
- * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
- * 1k on 32bit.
+ * Using 6 bits at each layer allows us to allocate 7 layers out of each page.
+ * 8 bits only gave us 3 layers out of every pair of pages, which is less
+ * efficient except for trees with a largest element between 192-255 inclusive.
*/
-#define IDR_BITS 8
+#define IDR_BITS 6
#define IDR_SIZE (1 << IDR_BITS)
#define IDR_MASK ((1 << IDR_BITS)-1)
@@ -56,6 +55,32 @@ struct idr {
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
/**
+ * idr_get_cursor - Return the current position of the cyclic allocator
+ * @idr: idr handle
+ *
+ * The value returned is the value that will be next returned from
+ * idr_alloc_cyclic() if it is free (otherwise the search will start from
+ * this position).
+ */
+static inline unsigned int idr_get_cursor(struct idr *idr)
+{
+ return READ_ONCE(idr->cur);
+}
+
+/**
+ * idr_set_cursor - Set the current position of the cyclic allocator
+ * @idr: idr handle
+ * @val: new position
+ *
+ * The next call to idr_alloc_cyclic() will return @val if it is free
+ * (otherwise the search will start from this position).
+ */
+static inline void idr_set_cursor(struct idr *idr, unsigned int val)
+{
+ WRITE_ONCE(idr->cur, val);
+}
+
+/**
* DOC: idr sync
* idr synchronization (stolen from radix-tree.h)
*
@@ -195,6 +220,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
return ida_get_new_above(ida, 0, p_id);
}
+static inline bool ida_is_empty(struct ida *ida)
+{
+ return idr_is_empty(&ida->idr);
+}
+
void __init idr_init_cache(void);
#endif /* __IDR_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a80516fd65c8..fe849329511a 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1576,6 +1576,9 @@ struct ieee80211_vht_operation {
#define WLAN_AUTH_SHARED_KEY 1
#define WLAN_AUTH_FT 2
#define WLAN_AUTH_SAE 3
+#define WLAN_AUTH_FILS_SK 4
+#define WLAN_AUTH_FILS_SK_PFS 5
+#define WLAN_AUTH_FILS_PK 6
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -1960,6 +1963,26 @@ enum ieee80211_eid {
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
+ WLAN_EID_CAG_NUMBER = 237,
+ WLAN_EID_AP_CSN = 239,
+ WLAN_EID_FILS_INDICATION = 240,
+ WLAN_EID_DILS = 241,
+ WLAN_EID_FRAGMENT = 242,
+ WLAN_EID_EXTENSION = 255
+};
+
+/* Element ID Extensions for Element ID 255 */
+enum ieee80211_eid_ext {
+ WLAN_EID_EXT_ASSOC_DELAY_INFO = 1,
+ WLAN_EID_EXT_FILS_REQ_PARAMS = 2,
+ WLAN_EID_EXT_FILS_KEY_CONFIRM = 3,
+ WLAN_EID_EXT_FILS_SESSION = 4,
+ WLAN_EID_EXT_FILS_HLP_CONTAINER = 5,
+ WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6,
+ WLAN_EID_EXT_KEY_DELIVERY = 7,
+ WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
+ WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
+ WLAN_EID_EXT_FILS_NONCE = 13,
};
/* Action category code */
@@ -2073,6 +2096,9 @@ enum ieee80211_key_len {
#define IEEE80211_GCMP_MIC_LEN 16
#define IEEE80211_GCMP_PN_LEN 6
+#define FILS_NONCE_LEN 16
+#define FILS_MAX_KEK_LEN 64
+
/* Public action codes */
enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index f563907ed776..3355efc89781 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -44,4 +44,20 @@ static inline int arp_hdr_len(struct net_device *dev)
return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
}
}
+
+static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
+{
+ switch (dev->type) {
+ case ARPHRD_TUNNEL:
+ case ARPHRD_TUNNEL6:
+ case ARPHRD_SIT:
+ case ARPHRD_IPGRE:
+ case ARPHRD_VOID:
+ case ARPHRD_NONE:
+ return false;
+ default:
+ return true;
+ }
+}
+
#endif /* _LINUX_IF_ARP_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 3319d97d789d..8d5fcd6284ce 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -399,22 +399,6 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
skb->vlan_tci = 0;
return skb;
}
-/*
- * vlan_hwaccel_push_inside - pushes vlan tag to the payload
- * @skb: skbuff to tag
- *
- * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
- * VLAN tag from @skb->vlan_tci inside to the payload.
- *
- * Following the skb_unshare() example, in case of error, the calling function
- * doesn't have to worry about freeing the original skb.
- */
-static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
-{
- if (skb_vlan_tag_present(skb))
- skb = __vlan_hwaccel_push_inside(skb);
- return skb;
-}
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index e7fdec4db9da..5ba430cc9a87 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -136,6 +136,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = (_si), \
.scan_type = { \
.sign = 'u', \
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 228bd44efa4c..497f2b3a5a62 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -116,6 +116,16 @@ struct st_sensor_bdu {
};
/**
+ * struct st_sensor_das - ST sensor device data alignment selection
+ * @addr: address of the register.
+ * @mask: mask to write the das flag for left alignment.
+ */
+struct st_sensor_das {
+ u8 addr;
+ u8 mask;
+};
+
+/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
* @addr: address of the register.
* @mask_int1: mask to enable/disable IRQ on INT1 pin.
@@ -185,6 +195,7 @@ struct st_sensor_transfer_function {
* @enable_axis: Enable one or more axis of the sensor.
* @fs: Full scale register and full scale list available.
* @bdu: Block data update register.
+ * @das: Data Alignment Selection register.
* @drdy_irq: Data ready register of the sensor.
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
@@ -200,6 +211,7 @@ struct st_sensor_settings {
struct st_sensor_axis enable_axis;
struct st_sensor_fullscale fs;
struct st_sensor_bdu bdu;
+ struct st_sensor_das das;
struct st_sensor_data_ready_irq drdy_irq;
bool multi_read_bit;
unsigned int bootime;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 9edccfba1ffb..47eeec3218b5 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -226,6 +226,34 @@ int iio_read_channel_processed(struct iio_channel *chan, int *val);
int iio_write_channel_raw(struct iio_channel *chan, int val);
/**
+ * iio_read_max_channel_raw() - read maximum available raw value from a given
+ * channel, i.e. the maximum possible value.
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units are required.
+ */
+int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
+
+/**
+ * iio_read_avail_channel_raw() - read available raw values from a given channel
+ * @chan: The channel being queried.
+ * @vals: Available values read back.
+ * @length: Number of entries in vals.
+ *
+ * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST.
+ *
+ * For ranges, three vals are always returned; min, step and max.
+ * For lists, all the possible values are enumerated.
+ *
+ * Note raw available values from iio channels are in adc counts and
+ * hence scale will need to be applied if standard units are required.
+ */
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+ const int **vals, int *length);
+
+/**
* iio_get_channel_type() - get the type of a channel
* @channel: The channel being queried.
* @type: The type of the channel.
@@ -236,6 +264,19 @@ int iio_get_channel_type(struct iio_channel *channel,
enum iio_chan_type *type);
/**
+ * iio_read_channel_offset() - read the offset value for a channel
+ * @chan: The channel being queried.
+ * @val: First part of value read back.
+ * @val2: Second part of value read back.
+ *
+ * Note returns a description of what is in val and val2, such
+ * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_offset(struct iio_channel *chan, int *val,
+ int *val2);
+
+/**
* iio_read_channel_scale() - read the scale value for a channel
* @chan: The channel being queried.
* @val: First part of value read back.
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
index 91530e6611e9..628b2cf54c50 100644
--- a/include/linux/iio/dac/mcp4725.h
+++ b/include/linux/iio/dac/mcp4725.h
@@ -9,8 +9,18 @@
#ifndef IIO_DAC_MCP4725_H_
#define IIO_DAC_MCP4725_H_
+/**
+ * struct mcp4725_platform_data - MCP4725/6 DAC specific data.
+ * @use_vref: Whether an external reference voltage on Vref pin should be used.
+ * Additional vref-supply must be specified when used.
+ * @vref_buffered: Controls buffering of the external reference voltage.
+ *
+ * Vref related settings are available only on MCP4756. See
+ * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information.
+ */
struct mcp4725_platform_data {
- u16 vref_mv;
+ bool use_vref;
+ bool vref_buffered;
};
#endif /* IIO_DAC_MCP4725_H_ */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b4a0679e4a49..3f5ea2e9a39e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -225,12 +225,22 @@ struct iio_event_spec {
* endianness: little or big endian
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
+ * @info_mask_separate_available: What availability information is to be
+ * exported that is specific to this channel.
* @info_mask_shared_by_type: What information is to be exported that is shared
* by all channels of the same type.
+ * @info_mask_shared_by_type_available: What availability information is to be
+ * exported that is shared by all channels of the same
+ * type.
* @info_mask_shared_by_dir: What information is to be exported that is shared
* by all channels of the same direction.
+ * @info_mask_shared_by_dir_available: What availability information is to be
+ * exported that is shared by all channels of the same
+ * direction.
* @info_mask_shared_by_all: What information is to be exported that is shared
* by all channels.
+ * @info_mask_shared_by_all_available: What availability information is to be
+ * exported that is shared by all channels.
* @event_spec: Array of events which should be registered for this
* channel.
* @num_event_specs: Size of the event_spec array.
@@ -269,9 +279,13 @@ struct iio_chan_spec {
enum iio_endian endianness;
} scan_type;
long info_mask_separate;
+ long info_mask_separate_available;
long info_mask_shared_by_type;
+ long info_mask_shared_by_type_available;
long info_mask_shared_by_dir;
+ long info_mask_shared_by_dir_available;
long info_mask_shared_by_all;
+ long info_mask_shared_by_all_available;
const struct iio_event_spec *event_spec;
unsigned int num_event_specs;
const struct iio_chan_spec_ext_info *ext_info;
@@ -301,6 +315,23 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
(chan->info_mask_shared_by_all & BIT(type));
}
+/**
+ * iio_channel_has_available() - Checks if a channel has an available attribute
+ * @chan: The channel to be queried
+ * @type: Type of the available attribute to be checked
+ *
+ * Returns true if the channel supports reporting available values for the
+ * given attribute type, false otherwise.
+ */
+static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
+ enum iio_chan_info_enum type)
+{
+ return (chan->info_mask_separate_available & BIT(type)) |
+ (chan->info_mask_shared_by_type_available & BIT(type)) |
+ (chan->info_mask_shared_by_dir_available & BIT(type)) |
+ (chan->info_mask_shared_by_all_available & BIT(type));
+}
+
#define IIO_CHAN_SOFT_TIMESTAMP(_si) { \
.type = IIO_TIMESTAMP, \
.channel = -1, \
@@ -349,6 +380,14 @@ struct iio_dev;
* max_len specifies maximum number of elements
* vals pointer can contain. val_len is used to return
* length of valid elements in vals.
+ * @read_avail: function to return the available values from the device.
+ * mask specifies which value. Note 0 means the available
+ * values for the channel in question. Return value
+ * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
+ * returned in vals. The type of the vals are returned in
+ * type and the number of vals is returned in length. For
+ * ranges, there are always three vals returned; min, step
+ * and max. For lists, all possible values are enumerated.
* @write_raw: function to write a value to the device.
* Parameters are the same as for read_raw.
* @write_raw_get_fmt: callback function to query the expected
@@ -381,7 +420,7 @@ struct iio_dev;
**/
struct iio_info {
struct module *driver_module;
- struct attribute_group *event_attrs;
+ const struct attribute_group *event_attrs;
const struct attribute_group *attrs;
int (*read_raw)(struct iio_dev *indio_dev,
@@ -397,6 +436,13 @@ struct iio_info {
int *val_len,
long mask);
+ int (*read_avail)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type,
+ int *length,
+ long mask);
+
int (*write_raw)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 9cd8f747212f..ce9426c507fd 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -55,10 +55,34 @@ struct iio_const_attr {
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
.address = _addr }
+#define IIO_ATTR_RO(_name, _addr) \
+ { .dev_attr = __ATTR_RO(_name), \
+ .address = _addr }
+
+#define IIO_ATTR_WO(_name, _addr) \
+ { .dev_attr = __ATTR_WO(_name), \
+ .address = _addr }
+
+#define IIO_ATTR_RW(_name, _addr) \
+ { .dev_attr = __ATTR_RW(_name), \
+ .address = _addr }
+
#define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \
struct iio_dev_attr iio_dev_attr_##_name \
= IIO_ATTR(_name, _mode, _show, _store, _addr)
+#define IIO_DEVICE_ATTR_RO(_name, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR_RO(_name, _addr)
+
+#define IIO_DEVICE_ATTR_WO(_name, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR_WO(_name, _addr)
+
+#define IIO_DEVICE_ATTR_RW(_name, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR_RW(_name, _addr)
+
#define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \
struct iio_dev_attr iio_dev_attr_##_vname \
= IIO_ATTR(_name, _mode, _show, _store, _addr)
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 4f1154f7a33c..ea08302f2d7b 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -170,6 +170,8 @@ void iio_trigger_free(struct iio_trigger *trig);
*/
bool iio_trigger_using_own(struct iio_dev *indio_dev);
+int iio_trigger_validate_own_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev);
#else
struct iio_trigger;
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 32b579525004..2aa7b6384d64 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -29,4 +29,9 @@ enum iio_event_info {
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
+enum iio_available_type {
+ IIO_AVAIL_LIST,
+ IIO_AVAIL_RANGE,
+};
+
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0eb7c2e7f0d6..7f6952f8d6aa 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -11,6 +11,7 @@
#define _LINUX_IMA_H
#include <linux/fs.h>
+#include <linux/kexec.h>
struct linux_binprm;
#ifdef CONFIG_IMA
@@ -23,6 +24,10 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
extern void ima_post_path_mknod(struct dentry *dentry);
+#ifdef CONFIG_IMA_KEXEC
+extern void ima_add_kexec_buffer(struct kimage *image);
+#endif
+
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
@@ -62,6 +67,13 @@ static inline void ima_post_path_mknod(struct dentry *dentry)
#endif /* CONFIG_IMA */
+#ifndef CONFIG_IMA_KEXEC
+struct kimage;
+
+static inline void ima_add_kexec_buffer(struct kimage *image)
+{}
+#endif
+
#ifdef CONFIG_IMA_APPRAISE
extern void ima_inode_post_setattr(struct dentry *dentry);
extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
diff --git a/include/linux/init.h b/include/linux/init.h
index e30104ceb86d..885c3e6d0f9d 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -126,6 +126,9 @@ void prepare_namespace(void);
void __init load_default_modules(void);
int __init init_rootfs(void);
+#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX)
+extern bool rodata_enabled;
+#endif
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 72f0721f75e7..53144e78a369 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -232,6 +232,18 @@ struct irq_affinity_notify {
void (*release)(struct kref *ref);
};
+/**
+ * struct irq_affinity - Description for automatic irq affinity assignements
+ * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
+ * the MSI(-X) vector space
+ * @post_vectors: Don't apply affinity to @post_vectors at end of
+ * the MSI(-X) vector space
+ */
+struct irq_affinity {
+ int pre_vectors;
+ int post_vectors;
+};
+
#if defined(CONFIG_SMP)
extern cpumask_var_t irq_default_affinity;
@@ -278,8 +290,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
-int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
+struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -313,13 +325,13 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
}
static inline struct cpumask *
-irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
+irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
{
return NULL;
}
static inline int
-irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
+irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
{
return maxvec;
}
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7892f55a1866..a4c94b86401e 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -49,6 +49,8 @@ struct iomap {
#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
+#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
+#define IOMAP_DIRECT (1 << 4) /* direct I/O */
struct iomap_ops {
/*
@@ -82,4 +84,14 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
loff_t start, loff_t len, struct iomap_ops *ops);
+/*
+ * Flags for direct I/O ->end_io:
+ */
+#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
+#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
+typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
+ unsigned flags);
+ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ struct iomap_ops *ops, iomap_dio_end_io_t end_io);
+
#endif /* LINUX_IOMAP_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 436dc21318af..0ff5111f6959 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -253,6 +253,7 @@ extern void iommu_group_remove_device(struct device *dev);
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
+extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb);
@@ -351,6 +352,9 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
+void iommu_register_instance(struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops);
+const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
#else /* CONFIG_IOMMU_API */
@@ -580,6 +584,17 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
return -ENODEV;
}
+static inline void iommu_register_instance(struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops)
+{
+}
+
+static inline
+const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index a0649973ee5b..671d014e6429 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -64,6 +64,11 @@ struct ipv6_devconf {
} stable_secret;
__s32 use_oif_addrs_only;
__s32 keep_addr_on_down;
+ __s32 seg6_enabled;
+#ifdef CONFIG_IPV6_SEG6_HMAC
+ __s32 seg6_require_hmac;
+#endif
+ __u32 enhanced_dad;
struct ctl_table_header *sysctl_header;
};
@@ -229,8 +234,9 @@ struct ipv6_pinfo {
rxflow:1,
rxtclass:1,
rxpmtu:1,
- rxorigdstaddr:1;
- /* 2 bits hole */
+ rxorigdstaddr:1,
+ recvfragsize:1;
+ /* 1 bits hole */
} bits;
__u16 all;
} rxopt;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index b7e34313cdfe..e808f8ae6f14 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -239,7 +239,7 @@
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
-#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_VALID (1ULL << 63)
#define GITS_CBASER_SHAREABILITY_SHIFT (10)
#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53)
@@ -265,7 +265,7 @@
#define GITS_BASER_NR_REGS 8
-#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_VALID (1ULL << 63)
#define GITS_BASER_INDIRECT (1ULL << 62)
#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59)
@@ -295,10 +295,10 @@
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
#define GITS_BASER_PAGE_SIZE_SHIFT (8)
-#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_PAGES_SHIFT (0)
#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1)
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 81f930b0bca9..7b49c71c968b 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -259,11 +259,11 @@ extern void gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size, unsigned int cpu_vec,
unsigned int irqbase);
extern void gic_clocksource_init(unsigned int);
-extern cycle_t gic_read_count(void);
+extern u64 gic_read_count(void);
extern unsigned int gic_get_count_width(void);
-extern cycle_t gic_read_compare(void);
-extern void gic_write_compare(cycle_t cnt);
-extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
+extern u64 gic_read_compare(void);
+extern void gic_write_compare(u64 cnt);
+extern void gic_write_cpu_compare(u64 cnt, int cpu);
extern void gic_start_count(void);
extern void gic_stop_count(void);
extern int gic_get_c0_compare_int(void);
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
index 0fc6ff276221..8d80fdc68647 100644
--- a/include/linux/isdnif.h
+++ b/include/linux/isdnif.h
@@ -500,6 +500,6 @@ typedef struct {
*
*/
extern int register_isdn(isdn_if*);
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#endif /* __ISDNIF_H__ */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 410decacff8f..68bd88223417 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -77,7 +77,6 @@ extern int kdb_poll_idx;
* number whenever the kernel debugger is entered.
*/
extern int kdb_initial_cpu;
-extern atomic_t kdb_event;
/* Types and messages used for dynamically added kdb shell commands */
@@ -162,6 +161,7 @@ enum kdb_msgsrc {
};
extern int kdb_trap_printk;
+extern int kdb_printf_cpu;
extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
va_list args);
extern __printf(1, 2) int kdb_printf(const char *, ...);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index bc6ed52a39b9..56aec84237ad 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -45,11 +45,16 @@
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+/* @a is a power of 2 value */
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+/* generic data direction definitions */
+#define READ 0
+#define WRITE 1
+
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
#define u64_to_user_ptr(x) ( \
@@ -506,6 +511,15 @@ extern enum system_states {
#define TAINT_UNSIGNED_MODULE 13
#define TAINT_SOFTLOCKUP 14
#define TAINT_LIVEPATCH 15
+#define TAINT_FLAGS_COUNT 16
+
+struct taint_flag {
+ char true; /* character printed when tainted */
+ char false; /* character printed when not tainted */
+ bool module; /* also show as a per-module taint flag */
+};
+
+extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 44fda64ad434..00f776816aa3 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -78,8 +78,8 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
return kstat_cpu(cpu).irqs_sum;
}
-extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
-extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
+extern void account_user_time(struct task_struct *, cputime_t);
+extern void account_system_time(struct task_struct *, int, cputime_t);
extern void account_steal_time(cputime_t);
extern void account_idle_time(cputime_t);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 406c33dcae13..d419d0e51fe5 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -148,7 +148,36 @@ struct kexec_file_ops {
kexec_verify_sig_t *verify_sig;
#endif
};
-#endif
+
+/**
+ * struct kexec_buf - parameters for finding a place for a buffer in memory
+ * @image: kexec image in which memory to search.
+ * @buffer: Contents which will be copied to the allocated memory.
+ * @bufsz: Size of @buffer.
+ * @mem: On return will have address of the buffer in memory.
+ * @memsz: Size for the buffer in memory.
+ * @buf_align: Minimum alignment needed.
+ * @buf_min: The buffer can't be placed below this address.
+ * @buf_max: The buffer can't be placed above this address.
+ * @top_down: Allocate from top of memory.
+ */
+struct kexec_buf {
+ struct kimage *image;
+ void *buffer;
+ unsigned long bufsz;
+ unsigned long mem;
+ unsigned long memsz;
+ unsigned long buf_align;
+ unsigned long buf_min;
+ unsigned long buf_max;
+ bool top_down;
+};
+
+int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
+ int (*func)(u64, u64, void *));
+extern int kexec_add_buffer(struct kexec_buf *kbuf);
+int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+#endif /* CONFIG_KEXEC_FILE */
struct kimage {
kimage_entry_t head;
@@ -212,11 +241,6 @@ extern asmlinkage long sys_kexec_load(unsigned long entry,
struct kexec_segment __user *segments,
unsigned long flags);
extern int kernel_kexec(void);
-extern int kexec_add_buffer(struct kimage *image, char *buffer,
- unsigned long bufsz, unsigned long memsz,
- unsigned long buf_align, unsigned long buf_min,
- unsigned long buf_max, bool top_down,
- unsigned long *load_addr);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
@@ -259,12 +283,6 @@ phys_addr_t paddr_vmcoreinfo_note(void);
vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
-#define VMCOREINFO_PAGE_OFFSET(value) \
- vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value)
-#define VMCOREINFO_VMALLOC_START(value) \
- vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value)
-#define VMCOREINFO_VMEMMAP_START(value) \
- vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value)
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index a6e82a69c363..4fec8b775895 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -48,6 +48,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
__k; \
})
+void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
@@ -174,7 +175,7 @@ __printf(2, 3)
struct kthread_worker *
kthread_create_worker(unsigned int flags, const char namefmt[], ...);
-struct kthread_worker *
+__printf(3, 4) struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[], ...);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 0fb7ffb1775f..0c8bd45c8206 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -24,21 +24,8 @@
#include <linux/time.h>
#include <linux/jiffies.h>
-/*
- * ktime_t:
- *
- * A single 64-bit variable is used to store the hrtimers
- * internal representation of time values in scalar nanoseconds. The
- * design plays out best on 64-bit CPUs, where most conversions are
- * NOPs and most arithmetic ktime_t operations are plain arithmetic
- * operations.
- *
- */
-union ktime {
- s64 tv64;
-};
-
-typedef union ktime ktime_t; /* Kill this */
+/* Nanosecond scalar representation for kernel time values */
+typedef s64 ktime_t;
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
@@ -50,39 +37,34 @@ typedef union ktime ktime_t; /* Kill this */
static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
{
if (unlikely(secs >= KTIME_SEC_MAX))
- return (ktime_t){ .tv64 = KTIME_MAX };
+ return KTIME_MAX;
- return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
+ return secs * NSEC_PER_SEC + (s64)nsecs;
}
/* Subtract two ktime_t variables. rem = lhs -rhs: */
-#define ktime_sub(lhs, rhs) \
- ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
+#define ktime_sub(lhs, rhs) ((lhs) - (rhs))
/* Add two ktime_t variables. res = lhs + rhs: */
-#define ktime_add(lhs, rhs) \
- ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
+#define ktime_add(lhs, rhs) ((lhs) + (rhs))
/*
* Same as ktime_add(), but avoids undefined behaviour on overflow; however,
* this means that you must check the result for overflow yourself.
*/
-#define ktime_add_unsafe(lhs, rhs) \
- ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
+#define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs))
/*
* Add a ktime_t variable and a scalar nanosecond value.
* res = kt + nsval:
*/
-#define ktime_add_ns(kt, nsval) \
- ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
+#define ktime_add_ns(kt, nsval) ((kt) + (nsval))
/*
* Subtract a scalar nanosecod from a ktime_t variable
* res = kt - nsval:
*/
-#define ktime_sub_ns(kt, nsval) \
- ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
+#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
/* convert a timespec to ktime_t format: */
static inline ktime_t timespec_to_ktime(struct timespec ts)
@@ -103,31 +85,16 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
}
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
-#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
+#define ktime_to_timespec(kt) ns_to_timespec((kt))
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
-#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64)
+#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
-#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
+#define ktime_to_timeval(kt) ns_to_timeval((kt))
/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt) ((kt).tv64)
-
-
-/**
- * ktime_equal - Compares two ktime_t variables to see if they are equal
- * @cmp1: comparable1
- * @cmp2: comparable2
- *
- * Compare two ktime_t variables.
- *
- * Return: 1 if equal.
- */
-static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
-{
- return cmp1.tv64 == cmp2.tv64;
-}
+#define ktime_to_ns(kt) (kt)
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
@@ -141,9 +108,9 @@ static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
*/
static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
{
- if (cmp1.tv64 < cmp2.tv64)
+ if (cmp1 < cmp2)
return -1;
- if (cmp1.tv64 > cmp2.tv64)
+ if (cmp1 > cmp2)
return 1;
return 0;
}
@@ -182,7 +149,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div)
*/
BUG_ON(div < 0);
if (__builtin_constant_p(div) && !(div >> 32)) {
- s64 ns = kt.tv64;
+ s64 ns = kt;
u64 tmp = ns < 0 ? -ns : ns;
do_div(tmp, div);
@@ -199,7 +166,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div)
* so catch them on 64bit as well.
*/
WARN_ON(div < 0);
- return kt.tv64 / div;
+ return kt / div;
}
#endif
@@ -256,7 +223,7 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
struct timespec *ts)
{
- if (kt.tv64) {
+ if (kt) {
*ts = ktime_to_timespec(kt);
return true;
} else {
@@ -275,7 +242,7 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
struct timespec64 *ts)
{
- if (kt.tv64) {
+ if (kt) {
*ts = ktime_to_timespec64(kt);
return true;
} else {
@@ -290,20 +257,16 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
* this resolution values.
*/
#define LOW_RES_NSEC TICK_NSEC
-#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
+#define KTIME_LOW_RES (LOW_RES_NSEC)
static inline ktime_t ns_to_ktime(u64 ns)
{
- static const ktime_t ktime_zero = { .tv64 = 0 };
-
- return ktime_add_ns(ktime_zero, ns);
+ return ns;
}
static inline ktime_t ms_to_ktime(u64 ms)
{
- static const ktime_t ktime_zero = { .tv64 = 0 };
-
- return ktime_add_ms(ktime_zero, ms);
+ return ms * NSEC_PER_MSEC;
}
# include <linux/timekeeping.h>
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 01c0b9cc3915..1c5190dab2c1 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -224,7 +224,6 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
- unsigned char fpu_counter;
struct swait_queue_head wq;
struct pid *pid;
int sigset_active;
@@ -439,6 +438,9 @@ struct kvm {
pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_debug(fmt, ...) \
pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_debug_ratelimited(fmt, ...) \
+ pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
+ ## __VA_ARGS__)
#define kvm_pr_unimpl(fmt, ...) \
pr_err_ratelimited("kvm [%i]: " fmt, \
task_tgid_nr(current), ## __VA_ARGS__)
@@ -450,6 +452,9 @@ struct kvm {
#define vcpu_debug(vcpu, fmt, ...) \
kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+#define vcpu_debug_ratelimited(vcpu, fmt, ...) \
+ kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
+ ## __VA_ARGS__)
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
@@ -645,6 +650,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, int offset, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
@@ -1107,6 +1114,10 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
extern bool kvm_rebooting;
+extern unsigned int halt_poll_ns;
+extern unsigned int halt_poll_ns_grow;
+extern unsigned int halt_poll_ns_shrink;
+
struct kvm_device {
struct kvm_device_ops *ops;
struct kvm *kvm;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index ddfcb2df3656..569cb531094c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -42,16 +42,20 @@ struct led_classdev {
#define LED_UNREGISTERING (1 << 1)
/* Upper 16 bits reflect control information */
#define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_BLINK_SW (1 << 17)
-#define LED_BLINK_ONESHOT (1 << 18)
-#define LED_BLINK_ONESHOT_STOP (1 << 19)
-#define LED_BLINK_INVERT (1 << 20)
-#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
-#define LED_BLINK_DISABLE (1 << 22)
-#define LED_SYSFS_DISABLE (1 << 23)
-#define LED_DEV_CAP_FLASH (1 << 24)
-#define LED_HW_PLUGGABLE (1 << 25)
-#define LED_PANIC_INDICATOR (1 << 26)
+#define LED_SYSFS_DISABLE (1 << 17)
+#define LED_DEV_CAP_FLASH (1 << 18)
+#define LED_HW_PLUGGABLE (1 << 19)
+#define LED_PANIC_INDICATOR (1 << 20)
+
+ /* set_brightness_work / blink_timer flags, atomic, private. */
+ unsigned long work_flags;
+
+#define LED_BLINK_SW 0
+#define LED_BLINK_ONESHOT 1
+#define LED_BLINK_ONESHOT_STOP 2
+#define LED_BLINK_INVERT 3
+#define LED_BLINK_BRIGHTNESS_CHANGE 4
+#define LED_BLINK_DISABLE 5
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -89,6 +93,7 @@ struct led_classdev {
unsigned long blink_delay_on, blink_delay_off;
struct timer_list blink_timer;
int blink_brightness;
+ int new_blink_brightness;
void (*flash_resume)(struct led_classdev *led_cdev);
struct work_struct set_brightness_work;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 616eef4d81ea..c170be548b7f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -166,6 +166,8 @@ enum {
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
+ ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
+ ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
@@ -342,7 +344,9 @@ enum {
ATA_SHIFT_PIO = 0,
ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES,
ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES,
+ ATA_SHIFT_PRIO = 6,
+ ATA_PRIO_HIGH = 2,
/* size of buffer to pad xfers ending on unaligned boundaries */
ATA_DMA_PAD_SZ = 4,
@@ -542,6 +546,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes)
extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_unload_heads;
+extern struct device_attribute dev_attr_ncq_prio_enable;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d190786e4ad8..7c273bbc5351 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -47,6 +47,7 @@ struct ppa_addr {
struct nvm_rq;
struct nvm_id;
struct nvm_dev;
+struct nvm_tgt_dev;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
@@ -107,6 +108,8 @@ enum {
NVM_RSP_NOT_CHANGEABLE = 0x1,
NVM_RSP_ERR_FAILWRITE = 0x40ff,
NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
+ NVM_RSP_ERR_FAILECC = 0x4281,
+ NVM_RSP_WARN_HIGHECC = 0x4700,
/* Device opcodes */
NVM_OP_HBREAD = 0x02,
@@ -208,7 +211,7 @@ struct nvm_id {
struct nvm_target {
struct list_head list;
- struct nvm_dev *dev;
+ struct nvm_tgt_dev *dev;
struct nvm_tgt_type *type;
struct gendisk *disk;
};
@@ -228,7 +231,7 @@ typedef void (nvm_end_io_fn)(struct nvm_rq *);
struct nvm_rq {
struct nvm_tgt_instance *ins;
- struct nvm_dev *dev;
+ struct nvm_tgt_dev *dev;
struct bio *bio;
@@ -263,35 +266,12 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
return rqdata + 1;
}
-struct nvm_block;
-
-struct nvm_lun {
- int id;
-
- int lun_id;
- int chnl_id;
-
- spinlock_t lock;
-
- unsigned int nr_free_blocks; /* Number of unused blocks */
- struct nvm_block *blocks;
-};
-
enum {
NVM_BLK_ST_FREE = 0x1, /* Free block */
NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
-struct nvm_block {
- struct list_head list;
- struct nvm_lun *lun;
- unsigned long id;
-
- void *priv;
- int state;
-};
-
/* system block cpu representation */
struct nvm_sb_info {
unsigned long seqnr;
@@ -301,22 +281,12 @@ struct nvm_sb_info {
struct ppa_addr fs_ppa;
};
-struct nvm_dev {
- struct nvm_dev_ops *ops;
-
- struct list_head devices;
-
- /* Media manager */
- struct nvmm_type *mt;
- void *mp;
-
- /* System blocks */
- struct nvm_sb_info sb;
-
- /* Device information */
+/* Device generic information */
+struct nvm_geo {
int nr_chnls;
+ int nr_luns;
+ int luns_per_chnl; /* -1 if channels are not symmetric */
int nr_planes;
- int luns_per_chnl;
int sec_per_pg; /* only sectors for a single page */
int pgs_per_blk;
int blks_per_lun;
@@ -336,14 +306,44 @@ struct nvm_dev {
int sec_per_pl; /* all sectors across planes */
int sec_per_blk;
int sec_per_lun;
+};
+
+struct nvm_tgt_dev {
+ /* Device information */
+ struct nvm_geo geo;
+
+ /* Base ppas for target LUNs */
+ struct ppa_addr *luns;
+
+ sector_t total_secs;
+
+ struct nvm_id identity;
+ struct request_queue *q;
+
+ struct nvm_dev *parent;
+ void *map;
+};
+
+struct nvm_dev {
+ struct nvm_dev_ops *ops;
+
+ struct list_head devices;
+
+ /* Media manager */
+ struct nvmm_type *mt;
+ void *mp;
+
+ /* System blocks */
+ struct nvm_sb_info sb;
+
+ /* Device information */
+ struct nvm_geo geo;
/* lower page table */
int lps_per_blk;
int *lptbl;
- unsigned long total_blocks;
unsigned long total_secs;
- int nr_luns;
unsigned long *lun_map;
void *dma_pool;
@@ -352,26 +352,57 @@ struct nvm_dev {
/* Backend device */
struct request_queue *q;
- struct device dev;
- struct device *parent_dev;
char name[DISK_NAME_LEN];
void *private_data;
+ void *rmap;
+
struct mutex mlock;
spinlock_t lock;
};
+static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
+ u64 pba)
+{
+ struct ppa_addr l;
+ int secs, pgs, blks, luns;
+ sector_t ppa = pba;
+
+ l.ppa = 0;
+
+ div_u64_rem(ppa, geo->sec_per_pg, &secs);
+ l.g.sec = secs;
+
+ sector_div(ppa, geo->sec_per_pg);
+ div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
+ l.g.pg = pgs;
+
+ sector_div(ppa, geo->pgs_per_blk);
+ div_u64_rem(ppa, geo->blks_per_lun, &blks);
+ l.g.blk = blks;
+
+ sector_div(ppa, geo->blks_per_lun);
+ div_u64_rem(ppa, geo->luns_per_chnl, &luns);
+ l.g.lun = luns;
+
+ sector_div(ppa, geo->luns_per_chnl);
+ l.g.ch = ppa;
+
+ return l;
+}
+
static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
struct ppa_addr r)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr l;
- l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
- l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
- l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
- l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
- l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
- l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
+ l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
+ l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset;
+ l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset;
+ l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset;
+ l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset;
+ l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset;
return l;
}
@@ -379,24 +410,25 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
struct ppa_addr r)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr l;
l.ppa = 0;
/*
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
*/
- l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
- (((1 << dev->ppaf.blk_len) - 1));
- l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
- (((1 << dev->ppaf.pg_len) - 1));
- l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
- (((1 << dev->ppaf.sect_len) - 1));
- l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
- (((1 << dev->ppaf.pln_len) - 1));
- l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
- (((1 << dev->ppaf.lun_len) - 1));
- l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
- (((1 << dev->ppaf.ch_len) - 1));
+ l.g.blk = (r.ppa >> geo->ppaf.blk_offset) &
+ (((1 << geo->ppaf.blk_len) - 1));
+ l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) &
+ (((1 << geo->ppaf.pg_len) - 1));
+ l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) &
+ (((1 << geo->ppaf.sect_len) - 1));
+ l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) &
+ (((1 << geo->ppaf.pln_len) - 1));
+ l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) &
+ (((1 << geo->ppaf.lun_len) - 1));
+ l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) &
+ (((1 << geo->ppaf.ch_len) - 1));
return l;
}
@@ -411,18 +443,13 @@ static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
ppa_addr->ppa = ADDR_EMPTY;
}
-static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
- struct nvm_block *blk)
+static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
{
- struct ppa_addr ppa;
- struct nvm_lun *lun = blk->lun;
-
- ppa.ppa = 0;
- ppa.g.blk = blk->id % dev->blks_per_lun;
- ppa.g.lun = lun->lun_id;
- ppa.g.ch = lun->chnl_id;
+ if (ppa_empty(ppa1) || ppa_empty(ppa2))
+ return 0;
- return ppa;
+ return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
+ (ppa1.g.blk == ppa2.g.blk));
}
static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
@@ -432,7 +459,7 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
+typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
typedef void (nvm_tgt_exit_fn)(void *);
struct nvm_tgt_type {
@@ -465,23 +492,18 @@ typedef void (nvmm_unregister_fn)(struct nvm_dev *);
typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *);
typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *);
-typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
- struct nvm_lun *, unsigned long);
-typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
-typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
-typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
-typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
-typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
- unsigned long);
-typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
-typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
-typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
-typedef void (nvmm_release_lun)(struct nvm_dev *, int);
-typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
-
+typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *);
+typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int);
typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *,
+ struct ppa_addr, int);
+typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int);
+
+enum {
+ TRANS_TGT_TO_DEV = 0x0,
+ TRANS_DEV_TO_TGT = 0x1,
+};
struct nvmm_type {
const char *name;
@@ -493,54 +515,41 @@ struct nvmm_type {
nvmm_create_tgt_fn *create_tgt;
nvmm_remove_tgt_fn *remove_tgt;
- /* Block administration callbacks */
- nvmm_get_blk_fn *get_blk;
- nvmm_put_blk_fn *put_blk;
- nvmm_open_blk_fn *open_blk;
- nvmm_close_blk_fn *close_blk;
- nvmm_flush_blk_fn *flush_blk;
-
nvmm_submit_io_fn *submit_io;
nvmm_erase_blk_fn *erase_blk;
- /* Bad block mgmt */
- nvmm_mark_blk_fn *mark_blk;
-
- /* Configuration management */
- nvmm_get_lun_fn *get_lun;
- nvmm_reserve_lun *reserve_lun;
- nvmm_release_lun *release_lun;
-
- /* Statistics */
- nvmm_lun_info_print_fn *lun_info_print;
-
nvmm_get_area_fn *get_area;
nvmm_put_area_fn *put_area;
+ nvmm_trans_ppa_fn *trans_ppa;
+ nvmm_part_to_tgt_fn *part_to_tgt;
+
struct list_head list;
};
extern int nvm_register_mgr(struct nvmm_type *);
extern void nvm_unregister_mgr(struct nvmm_type *);
-extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
- unsigned long);
-extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
-
extern struct nvm_dev *nvm_alloc_dev(int);
extern int nvm_register(struct nvm_dev *);
extern void nvm_unregister(struct nvm_dev *);
-void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
-
-extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
+extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int);
+extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
+ int, int);
+extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
+extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
const struct ppa_addr *, int, int);
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
-extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
-extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
+extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int);
+extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
+extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
+ void *);
+extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
+extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
extern void nvm_end_io(struct nvm_rq *, int);
extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
void *, int);
@@ -548,6 +557,7 @@ extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
int, void *, int);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
+extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
/* sysblk.c */
#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
@@ -569,10 +579,10 @@ extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
extern int nvm_dev_factory(struct nvm_dev *, int flags);
-#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \
- for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \
+#define nvm_for_each_lun_ppa(geo, ppa, chid, lunid) \
+ for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls; \
(chid)++, (ppa).g.ch = (chid)) \
- for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \
+ for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl; \
(lunid)++, (ppa).g.lun = (lunid))
#else /* CONFIG_NVM */
diff --git a/include/linux/list.h b/include/linux/list.h
index 5809e9a2de5b..d1039ecaf94f 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -28,27 +28,42 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
list->prev = list;
}
+#ifdef CONFIG_DEBUG_LIST
+extern bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+extern bool __list_del_entry_valid(struct list_head *entry);
+#else
+static inline bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ return true;
+}
+static inline bool __list_del_entry_valid(struct list_head *entry)
+{
+ return true;
+}
+#endif
+
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
+ if (!__list_add_valid(new, prev, next))
+ return;
+
next->prev = new;
new->next = next;
new->prev = prev;
WRITE_ONCE(prev->next, new);
}
-#else
-extern void __list_add(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
-#endif
/**
* list_add - add a new entry
@@ -96,22 +111,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_del_entry(struct list_head *entry)
{
+ if (!__list_del_entry_valid(entry))
+ return;
+
__list_del(entry->prev, entry->next);
}
static inline void list_del(struct list_head *entry)
{
- __list_del(entry->prev, entry->next);
+ __list_del_entry(entry);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
-#else
-extern void __list_del_entry(struct list_head *entry);
-extern void list_del(struct list_head *entry);
-#endif
/**
* list_replace - replace old entry by new one
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c1458fede1f9..1e327bb80838 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -338,9 +338,18 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
-#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
+/*
+ * Same "read" as for lock_acquire(), except -1 means any.
+ */
+extern int lock_is_held_type(struct lockdep_map *lock, int read);
+
+static inline int lock_is_held(struct lockdep_map *lock)
+{
+ return lock_is_held_type(lock, -1);
+}
-extern int lock_is_held(struct lockdep_map *lock);
+#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
+#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
@@ -372,6 +381,14 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
WARN_ON(debug_locks && !lockdep_is_held(l)); \
} while (0)
+#define lockdep_assert_held_exclusive(l) do { \
+ WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
+ } while (0)
+
+#define lockdep_assert_held_read(l) do { \
+ WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
+ } while (0)
+
#define lockdep_assert_held_once(l) do { \
WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
} while (0)
@@ -428,7 +445,11 @@ struct lock_class_key { };
#define lockdep_depth(tsk) (0)
+#define lockdep_is_held_type(l, r) (1)
+
#define lockdep_assert_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_recursing(tsk) (0)
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 2931aa43dab1..0d3f14fd2621 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -82,6 +82,7 @@ static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size,
}
#endif
+#ifdef CONFIG_MVEBU_MBUS
int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
@@ -97,5 +98,12 @@ int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
size_t mbus_size, phys_addr_t sdram_phys_base,
size_t sdram_size);
int mvebu_mbus_dt_init(bool is_coherent);
+#else
+static inline int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target,
+ u8 *attr)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_MVEBU_MBUS */
#endif /* __LINUX_MBUS_H */
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index a585b4b5fa0e..0661af17a758 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -16,6 +16,7 @@
#include <asm/mc146818rtc.h> /* register access macros */
#include <linux/bcd.h>
#include <linux/delay.h>
+#include <linux/pm-trace.h>
#ifdef __KERNEL__
#include <linux/spinlock.h> /* spinlock_t */
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
new file mode 100644
index 000000000000..b6e048e1045f
--- /dev/null
+++ b/include/linux/mdev.h
@@ -0,0 +1,138 @@
+/*
+ * Mediated device definition
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MDEV_H
+#define MDEV_H
+
+struct mdev_device;
+
+/**
+ * struct mdev_parent_ops - Structure to be registered for each parent device to
+ * register the device to mdev module.
+ *
+ * @owner: The module owner.
+ * @dev_attr_groups: Attributes of the parent device.
+ * @mdev_attr_groups: Attributes of the mediated device.
+ * @supported_type_groups: Attributes to define supported types. It is mandatory
+ * to provide supported types.
+ * @create: Called to allocate basic resources in parent device's
+ * driver for a particular mediated device. It is
+ * mandatory to provide create ops.
+ * @kobj: kobject of type for which 'create' is called.
+ * @mdev: mdev_device structure on of mediated device
+ * that is being created
+ * Returns integer: success (0) or error (< 0)
+ * @remove: Called to free resources in parent device's driver for a
+ * a mediated device. It is mandatory to provide 'remove'
+ * ops.
+ * @mdev: mdev_device device structure which is being
+ * destroyed
+ * Returns integer: success (0) or error (< 0)
+ * @open: Open mediated device.
+ * @mdev: mediated device.
+ * Returns integer: success (0) or error (< 0)
+ * @release: release mediated device
+ * @mdev: mediated device.
+ * @read: Read emulation callback
+ * @mdev: mediated device structure
+ * @buf: read buffer
+ * @count: number of bytes to read
+ * @ppos: address.
+ * Retuns number on bytes read on success or error.
+ * @write: Write emulation callback
+ * @mdev: mediated device structure
+ * @buf: write buffer
+ * @count: number of bytes to be written
+ * @ppos: address.
+ * Retuns number on bytes written on success or error.
+ * @ioctl: IOCTL callback
+ * @mdev: mediated device structure
+ * @cmd: ioctl command
+ * @arg: arguments to ioctl
+ * @mmap: mmap callback
+ * @mdev: mediated device structure
+ * @vma: vma structure
+ * Parent device that support mediated device should be registered with mdev
+ * module with mdev_parent_ops structure.
+ **/
+struct mdev_parent_ops {
+ struct module *owner;
+ const struct attribute_group **dev_attr_groups;
+ const struct attribute_group **mdev_attr_groups;
+ struct attribute_group **supported_type_groups;
+
+ int (*create)(struct kobject *kobj, struct mdev_device *mdev);
+ int (*remove)(struct mdev_device *mdev);
+ int (*open)(struct mdev_device *mdev);
+ void (*release)(struct mdev_device *mdev);
+ ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
+ size_t count, loff_t *ppos);
+ long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
+};
+
+/* interface for exporting mdev supported type attributes */
+struct mdev_type_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct device *dev,
+ const char *buf, size_t count);
+};
+
+#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \
+struct mdev_type_attribute mdev_type_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+#define MDEV_TYPE_ATTR_RW(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name)
+#define MDEV_TYPE_ATTR_RO(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
+#define MDEV_TYPE_ATTR_WO(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
+
+/**
+ * struct mdev_driver - Mediated device driver
+ * @name: driver name
+ * @probe: called when new device created
+ * @remove: called when device removed
+ * @driver: device driver structure
+ *
+ **/
+struct mdev_driver {
+ const char *name;
+ int (*probe)(struct device *dev);
+ void (*remove)(struct device *dev);
+ struct device_driver driver;
+};
+
+#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
+
+extern void *mdev_get_drvdata(struct mdev_device *mdev);
+extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
+extern uuid_le mdev_uuid(struct mdev_device *mdev);
+
+extern struct bus_type mdev_bus_type;
+
+extern int mdev_register_device(struct device *dev,
+ const struct mdev_parent_ops *ops);
+extern void mdev_unregister_device(struct device *dev);
+
+extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
+extern void mdev_unregister_driver(struct mdev_driver *drv);
+
+extern struct device *mdev_parent_dev(struct mdev_device *mdev);
+extern struct device *mdev_dev(struct mdev_device *mdev);
+extern struct mdev_device *mdev_from_dev(struct device *dev);
+
+#endif /* MDEV_H */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index e746919530f5..a0d274fe08f1 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -8,8 +8,7 @@
struct mei_cl_device;
struct mei_device;
-typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev,
- u32 events, void *context);
+typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
/**
* struct mei_cl_device - MEI device handle
@@ -24,12 +23,12 @@ typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev,
* @me_cl: me client
* @cl: mei client
* @name: device name
- * @event_work: async work to execute event callback
- * @event_cb: Drivers register this callback to get asynchronous ME
- * events (e.g. Rx buffer pending) notifications.
- * @event_context: event callback run context
- * @events_mask: Events bit mask requested by driver.
- * @events: Events bitmask sent to the driver.
+ * @rx_work: async work to execute Rx event callback
+ * @rx_cb: Drivers register this callback to get asynchronous ME
+ * Rx buffer pending notifications.
+ * @notif_work: async work to execute FW notif event callback
+ * @notif_cb: Drivers register this callback to get asynchronous ME
+ * FW notification pending notifications.
*
* @do_match: wheather device can be matched with a driver
* @is_added: device is already scanned
@@ -44,11 +43,10 @@ struct mei_cl_device {
struct mei_cl *cl;
char name[MEI_CL_NAME_SIZE];
- struct work_struct event_work;
- mei_cldev_event_cb_t event_cb;
- void *event_context;
- unsigned long events_mask;
- unsigned long events;
+ struct work_struct rx_work;
+ mei_cldev_cb_t rx_cb;
+ struct work_struct notif_work;
+ mei_cldev_cb_t notif_cb;
unsigned int do_match:1;
unsigned int is_added:1;
@@ -74,16 +72,27 @@ int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
-ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
-ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
+/**
+ * module_mei_cl_driver - Helper macro for registering mei cl driver
+ *
+ * @__mei_cldrv: mei_cl_driver structure
+ *
+ * Helper macro for mei cl drivers which do not do anything special in module
+ * init/exit, for eliminating a boilerplate code.
+ */
+#define module_mei_cl_driver(__mei_cldrv) \
+ module_driver(__mei_cldrv, \
+ mei_cldev_driver_register,\
+ mei_cldev_driver_unregister)
-int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
- unsigned long event_mask,
- mei_cldev_event_cb_t read_cb, void *context);
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
+ size_t length);
-#define MEI_CL_EVENT_RX 0
-#define MEI_CL_EVENT_TX 1
-#define MEI_CL_EVENT_NOTIF 2
+int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
+int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
+ mei_cldev_cb_t notif_cb);
const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
u8 mei_cldev_ver(const struct mei_cl_device *cldev);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5e5b2969d931..5f4d8281832b 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -7,6 +7,7 @@
#include <linux/mmzone.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
@@ -177,6 +178,13 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return false;
+ /*
+ * DAX device mappings require predictable access latency, so avoid
+ * incurring periodic faults.
+ */
+ if (vma_is_dax(vma))
+ return false;
+
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
if (vma->vm_flags & VM_HUGETLB)
return false;
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index fec597fb34cb..a4860bc9b73d 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -115,6 +115,8 @@ enum {
#define AXP806_CLDO2_V_CTRL 0x25
#define AXP806_CLDO3_V_CTRL 0x26
#define AXP806_VREF_TEMP_WARN_L 0xf3
+#define AXP806_BUS_ADDR_EXT 0xfe
+#define AXP806_REG_ADDR_EXT 0xff
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
@@ -226,6 +228,10 @@ enum {
#define AXP20X_OCV_MAX 0xf
/* AXP22X specific registers */
+#define AXP22X_PMIC_ADC_H 0x56
+#define AXP22X_PMIC_ADC_L 0x57
+#define AXP22X_TS_ADC_H 0x58
+#define AXP22X_TS_ADC_L 0x59
#define AXP22X_BATLOW_THRES1 0xe6
/* AXP288 specific registers */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 76f7ef4d3a0d..f62043a75f43 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -148,6 +148,15 @@ struct cros_ec_device {
int event_size;
};
+/**
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
+ *
+ * @sensor_num: Id of the sensor, as reported by the EC.
+ */
+struct cros_ec_sensor_platform {
+ u8 sensor_num;
+};
+
/* struct cros_ec_platform - ChromeOS EC platform information
*
* @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
@@ -175,6 +184,7 @@ struct cros_ec_dev {
struct cros_ec_device *ec_dev;
struct device *dev;
u16 cmd_offset;
+ u32 features[2];
};
/**
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 76728ff37d01..1683003603f3 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -713,6 +713,90 @@ struct ec_response_get_set_value {
/* More than one command can use these structs to get/set paramters. */
#define EC_CMD_GSV_PAUSE_IN_S5 0x0c
+/*****************************************************************************/
+/* List the features supported by the firmware */
+#define EC_CMD_GET_FEATURES 0x0d
+
+/* Supported features */
+enum ec_feature_code {
+ /*
+ * This image contains a limited set of features. Another image
+ * in RW partition may support more features.
+ */
+ EC_FEATURE_LIMITED = 0,
+ /*
+ * Commands for probing/reading/writing/erasing the flash in the
+ * EC are present.
+ */
+ EC_FEATURE_FLASH = 1,
+ /*
+ * Can control the fan speed directly.
+ */
+ EC_FEATURE_PWM_FAN = 2,
+ /*
+ * Can control the intensity of the keyboard backlight.
+ */
+ EC_FEATURE_PWM_KEYB = 3,
+ /*
+ * Support Google lightbar, introduced on Pixel.
+ */
+ EC_FEATURE_LIGHTBAR = 4,
+ /* Control of LEDs */
+ EC_FEATURE_LED = 5,
+ /* Exposes an interface to control gyro and sensors.
+ * The host goes through the EC to access these sensors.
+ * In addition, the EC may provide composite sensors, like lid angle.
+ */
+ EC_FEATURE_MOTION_SENSE = 6,
+ /* The keyboard is controlled by the EC */
+ EC_FEATURE_KEYB = 7,
+ /* The AP can use part of the EC flash as persistent storage. */
+ EC_FEATURE_PSTORE = 8,
+ /* The EC monitors BIOS port 80h, and can return POST codes. */
+ EC_FEATURE_PORT80 = 9,
+ /*
+ * Thermal management: include TMP specific commands.
+ * Higher level than direct fan control.
+ */
+ EC_FEATURE_THERMAL = 10,
+ /* Can switch the screen backlight on/off */
+ EC_FEATURE_BKLIGHT_SWITCH = 11,
+ /* Can switch the wifi module on/off */
+ EC_FEATURE_WIFI_SWITCH = 12,
+ /* Monitor host events, through for example SMI or SCI */
+ EC_FEATURE_HOST_EVENTS = 13,
+ /* The EC exposes GPIO commands to control/monitor connected devices. */
+ EC_FEATURE_GPIO = 14,
+ /* The EC can send i2c messages to downstream devices. */
+ EC_FEATURE_I2C = 15,
+ /* Command to control charger are included */
+ EC_FEATURE_CHARGER = 16,
+ /* Simple battery support. */
+ EC_FEATURE_BATTERY = 17,
+ /*
+ * Support Smart battery protocol
+ * (Common Smart Battery System Interface Specification)
+ */
+ EC_FEATURE_SMART_BATTERY = 18,
+ /* EC can dectect when the host hangs. */
+ EC_FEATURE_HANG_DETECT = 19,
+ /* Report power information, for pit only */
+ EC_FEATURE_PMU = 20,
+ /* Another Cros EC device is present downstream of this one */
+ EC_FEATURE_SUB_MCU = 21,
+ /* Support USB Power delivery (PD) commands */
+ EC_FEATURE_USB_PD = 22,
+ /* Control USB multiplexer, for audio through USB port for instance. */
+ EC_FEATURE_USB_MUX = 23,
+ /* Motion Sensor code has an internal software FIFO */
+ EC_FEATURE_MOTION_SENSE_FIFO = 24,
+};
+
+#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
+#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
+struct ec_response_get_features {
+ uint32_t flags[2];
+} __packed;
/*****************************************************************************/
/* Flash commands */
@@ -1315,6 +1399,24 @@ enum motionsense_command {
*/
MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5,
+ /*
+ * Returns a single sensor data.
+ */
+ MOTIONSENSE_CMD_DATA = 6,
+
+ /*
+ * Perform low level calibration.. On sensors that support it, ask to
+ * do offset calibration.
+ */
+ MOTIONSENSE_CMD_PERFORM_CALIB = 10,
+
+ /*
+ * Sensor Offset command is a setter/getter command for the offset used
+ * for calibration. The offsets can be calculated by the host, or via
+ * PERFORM_CALIB command.
+ */
+ MOTIONSENSE_CMD_SENSOR_OFFSET = 11,
+
/* Number of motionsense sub-commands. */
MOTIONSENSE_NUM_CMDS
};
@@ -1335,12 +1437,18 @@ enum motionsensor_id {
enum motionsensor_type {
MOTIONSENSE_TYPE_ACCEL = 0,
MOTIONSENSE_TYPE_GYRO = 1,
+ MOTIONSENSE_TYPE_MAG = 2,
+ MOTIONSENSE_TYPE_PROX = 3,
+ MOTIONSENSE_TYPE_LIGHT = 4,
+ MOTIONSENSE_TYPE_ACTIVITY = 5,
+ MOTIONSENSE_TYPE_MAX
};
/* List of motion sensor locations. */
enum motionsensor_location {
MOTIONSENSE_LOC_BASE = 0,
MOTIONSENSE_LOC_LID = 1,
+ MOTIONSENSE_LOC_MAX,
};
/* List of motion sensor chips. */
@@ -1361,6 +1469,31 @@ enum motionsensor_chip {
*/
#define EC_MOTION_SENSE_NO_VALUE -1
+#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000
+
+/* Set Calibration information */
+#define MOTION_SENSE_SET_OFFSET 1
+
+struct ec_response_motion_sensor_data {
+ /* Flags for each sensor. */
+ uint8_t flags;
+ /* Sensor number the data comes from */
+ uint8_t sensor_num;
+ /* Each sensor is up to 3-axis. */
+ union {
+ int16_t data[3];
+ struct {
+ uint16_t rsvd;
+ uint32_t timestamp;
+ } __packed;
+ struct {
+ uint8_t activity; /* motionsensor_activity */
+ uint8_t state;
+ int16_t add_info[2];
+ };
+ };
+} __packed;
+
struct ec_params_motion_sense {
uint8_t cmd;
union {
@@ -1378,9 +1511,37 @@ struct ec_params_motion_sense {
int16_t data;
} ec_rate, kb_wake_angle;
+ /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
+ struct {
+ uint8_t sensor_num;
+
+ /*
+ * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
+ * the calibration information in the EC.
+ * If unset, just retrieve calibration information.
+ */
+ uint16_t flags;
+
+ /*
+ * Temperature at calibration, in units of 0.01 C
+ * 0x8000: invalid / unknown.
+ * 0x0: 0C
+ * 0x7fff: +327.67C
+ */
+ int16_t temp;
+
+ /*
+ * Offset for calibration.
+ * Unit:
+ * Accelerometer: 1/1024 g
+ * Gyro: 1/1024 deg/s
+ * Compass: 1/16 uT
+ */
+ int16_t offset[3];
+ } __packed sensor_offset;
+
/* Used for MOTIONSENSE_CMD_INFO. */
struct {
- /* Should be element of enum motionsensor_id. */
uint8_t sensor_num;
} info;
@@ -1410,11 +1571,14 @@ struct ec_response_motion_sense {
/* Flags representing the motion sensor module. */
uint8_t module_flags;
- /* Flags for each sensor in enum motionsensor_id. */
- uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT];
+ /* Number of sensors managed directly by the EC. */
+ uint8_t sensor_count;
- /* Array of all sensor data. Each sensor is 3-axis. */
- int16_t data[3*EC_MOTION_SENSOR_COUNT];
+ /*
+ * Sensor data is truncated if response_max is too small
+ * for holding all the data.
+ */
+ struct ec_response_motion_sensor_data sensor[0];
} dump;
/* Used for MOTIONSENSE_CMD_INFO. */
@@ -1429,6 +1593,9 @@ struct ec_response_motion_sense {
uint8_t chip;
} info;
+ /* Used for MOTIONSENSE_CMD_DATA */
+ struct ec_response_motion_sensor_data data;
+
/*
* Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR,
* MOTIONSENSE_CMD_SENSOR_RANGE, and
@@ -1438,6 +1605,12 @@ struct ec_response_motion_sense {
/* Current value of the parameter queried. */
int32_t ret;
} ec_rate, sensor_odr, sensor_range, kb_wake_angle;
+
+ /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
+ struct {
+ int16_t temp;
+ int16_t offset[3];
+ } sensor_offset, perform_calib;
};
} __packed;
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 8e1cdbef3dad..2c0127cb06c5 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -28,8 +28,6 @@
#include <linux/mfd/core.h>
#include <linux/platform_data/edma.h>
-#include <mach/hardware.h>
-
struct regmap;
/*
@@ -99,8 +97,6 @@ struct davinci_vcif {
dma_addr_t dma_rx_addr;
};
-struct davinci_vc;
-
struct davinci_vc {
/* Device data */
struct device *dev;
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index cf619dbeace2..956caa0628f5 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -26,6 +26,7 @@ struct intel_soc_pmic {
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
struct regmap_irq_chip_data *irq_chip_data_level2;
+ struct regmap_irq_chip_data *irq_chip_data_tmu;
struct device *dev;
};
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index 3ca0af07fc78..ad2a9a852aea 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -180,6 +180,7 @@
#define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2)
#define MAX77620_SD_CFG1_FPWM_SD_SKIP 0
#define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2)
+#define MAX20024_SD_CFG1_MPOK_MASK BIT(1)
#define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0)
#define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0
#define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0)
@@ -187,6 +188,7 @@
/* LDO_CNFG2 */
#define MAX77620_LDO_POWER_MODE_MASK 0xC0
#define MAX77620_LDO_POWER_MODE_SHIFT 6
+#define MAX20024_LDO_CFG2_MPOK_MASK BIT(2)
#define MAX77620_LDO_CFG2_ADE_MASK BIT(1)
#define MAX77620_LDO_CFG2_ADE_DISABLE 0
#define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1)
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 6d435a3c06bc..83701ef7d3c7 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -290,6 +290,7 @@ enum rk818_reg {
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
+#define DEV_OFF BIT(0)
#define VB_LO_ACT BIT(4)
#define VB_LO_SEL_3500MV (7 << 0)
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index cadc6543909d..e5a6cdeb77db 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -58,10 +58,13 @@
#define RN5T618_DC3CTL2 0x31
#define RN5T618_DC4CTL 0x32
#define RN5T618_DC4CTL2 0x33
+#define RN5T618_DC5CTL 0x34
+#define RN5T618_DC5CTL2 0x35
#define RN5T618_DC1DAC 0x36
#define RN5T618_DC2DAC 0x37
#define RN5T618_DC3DAC 0x38
#define RN5T618_DC4DAC 0x39
+#define RN5T618_DC5DAC 0x3a
#define RN5T618_DC1DAC_SLP 0x3b
#define RN5T618_DC2DAC_SLP 0x3c
#define RN5T618_DC3DAC_SLP 0x3d
@@ -77,6 +80,11 @@
#define RN5T618_LDO3DAC 0x4e
#define RN5T618_LDO4DAC 0x4f
#define RN5T618_LDO5DAC 0x50
+#define RN5T618_LDO6DAC 0x51
+#define RN5T618_LDO7DAC 0x52
+#define RN5T618_LDO8DAC 0x53
+#define RN5T618_LDO9DAC 0x54
+#define RN5T618_LDO10DAC 0x55
#define RN5T618_LDORTCDAC 0x56
#define RN5T618_LDORTC2DAC 0x57
#define RN5T618_LDO1DAC_SLP 0x58
@@ -231,6 +239,7 @@ enum {
enum {
RN5T567 = 0,
RN5T618,
+ RC5T619,
};
struct rn5t618 {
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
new file mode 100644
index 000000000000..d7a29f246d64
--- /dev/null
+++ b/include/linux/mfd/sun4i-gpadc.h
@@ -0,0 +1,94 @@
+/* Header of ADC MFD core driver for sunxi platforms
+ *
+ * Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#ifndef __SUN4I_GPADC__H__
+#define __SUN4I_GPADC__H__
+
+#define SUN4I_GPADC_CTRL0 0x00
+
+#define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY(x) ((GENMASK(7, 0) & (x)) << 24)
+#define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY_MODE BIT(23)
+#define SUN4I_GPADC_CTRL0_ADC_CLK_SELECT BIT(22)
+#define SUN4I_GPADC_CTRL0_ADC_CLK_DIVIDER(x) ((GENMASK(1, 0) & (x)) << 20)
+#define SUN4I_GPADC_CTRL0_FS_DIV(x) ((GENMASK(3, 0) & (x)) << 16)
+#define SUN4I_GPADC_CTRL0_T_ACQ(x) (GENMASK(15, 0) & (x))
+
+#define SUN4I_GPADC_CTRL1 0x04
+
+#define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE(x) ((GENMASK(7, 0) & (x)) << 12)
+#define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE_EN BIT(9)
+#define SUN4I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(6)
+#define SUN4I_GPADC_CTRL1_TP_DUAL_EN BIT(5)
+#define SUN4I_GPADC_CTRL1_TP_MODE_EN BIT(4)
+#define SUN4I_GPADC_CTRL1_TP_ADC_SELECT BIT(3)
+#define SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(2, 0) & (x))
+
+/* TP_CTRL1 bits for sun6i SOCs */
+#define SUN6I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(7)
+#define SUN6I_GPADC_CTRL1_TP_DUAL_EN BIT(6)
+#define SUN6I_GPADC_CTRL1_TP_MODE_EN BIT(5)
+#define SUN6I_GPADC_CTRL1_TP_ADC_SELECT BIT(4)
+#define SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(3, 0) & BIT(x))
+
+#define SUN4I_GPADC_CTRL2 0x08
+
+#define SUN4I_GPADC_CTRL2_TP_SENSITIVE_ADJUST(x) ((GENMASK(3, 0) & (x)) << 28)
+#define SUN4I_GPADC_CTRL2_TP_MODE_SELECT(x) ((GENMASK(1, 0) & (x)) << 26)
+#define SUN4I_GPADC_CTRL2_PRE_MEA_EN BIT(24)
+#define SUN4I_GPADC_CTRL2_PRE_MEA_THRE_CNT(x) (GENMASK(23, 0) & (x))
+
+#define SUN4I_GPADC_CTRL3 0x0c
+
+#define SUN4I_GPADC_CTRL3_FILTER_EN BIT(2)
+#define SUN4I_GPADC_CTRL3_FILTER_TYPE(x) (GENMASK(1, 0) & (x))
+
+#define SUN4I_GPADC_TPR 0x18
+
+#define SUN4I_GPADC_TPR_TEMP_ENABLE BIT(16)
+#define SUN4I_GPADC_TPR_TEMP_PERIOD(x) (GENMASK(15, 0) & (x))
+
+#define SUN4I_GPADC_INT_FIFOC 0x10
+
+#define SUN4I_GPADC_INT_FIFOC_TEMP_IRQ_EN BIT(18)
+#define SUN4I_GPADC_INT_FIFOC_TP_OVERRUN_IRQ_EN BIT(17)
+#define SUN4I_GPADC_INT_FIFOC_TP_DATA_IRQ_EN BIT(16)
+#define SUN4I_GPADC_INT_FIFOC_TP_DATA_XY_CHANGE BIT(13)
+#define SUN4I_GPADC_INT_FIFOC_TP_FIFO_TRIG_LEVEL(x) ((GENMASK(4, 0) & (x)) << 8)
+#define SUN4I_GPADC_INT_FIFOC_TP_DATA_DRQ_EN BIT(7)
+#define SUN4I_GPADC_INT_FIFOC_TP_FIFO_FLUSH BIT(4)
+#define SUN4I_GPADC_INT_FIFOC_TP_UP_IRQ_EN BIT(1)
+#define SUN4I_GPADC_INT_FIFOC_TP_DOWN_IRQ_EN BIT(0)
+
+#define SUN4I_GPADC_INT_FIFOS 0x14
+
+#define SUN4I_GPADC_INT_FIFOS_TEMP_DATA_PENDING BIT(18)
+#define SUN4I_GPADC_INT_FIFOS_FIFO_OVERRUN_PENDING BIT(17)
+#define SUN4I_GPADC_INT_FIFOS_FIFO_DATA_PENDING BIT(16)
+#define SUN4I_GPADC_INT_FIFOS_TP_IDLE_FLG BIT(2)
+#define SUN4I_GPADC_INT_FIFOS_TP_UP_PENDING BIT(1)
+#define SUN4I_GPADC_INT_FIFOS_TP_DOWN_PENDING BIT(0)
+
+#define SUN4I_GPADC_CDAT 0x1c
+#define SUN4I_GPADC_TEMP_DATA 0x20
+#define SUN4I_GPADC_DATA 0x24
+
+#define SUN4I_GPADC_IRQ_FIFO_DATA 0
+#define SUN4I_GPADC_IRQ_TEMP_DATA 1
+
+/* 10s delay before suspending the IP */
+#define SUN4I_GPADC_AUTOSUSPEND_DELAY 10000
+
+struct sun4i_gpadc_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irqc;
+ void __iomem *base;
+};
+
+#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 7f55b8b41032..b9a53e013bff 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -23,6 +23,8 @@
#define REG_IRQENABLE 0x02C
#define REG_IRQCLR 0x030
#define REG_IRQWAKEUP 0x034
+#define REG_DMAENABLE_SET 0x038
+#define REG_DMAENABLE_CLEAR 0x03c
#define REG_CTRL 0x040
#define REG_ADCFSM 0x044
#define REG_CLKDIV 0x04C
@@ -36,6 +38,7 @@
#define REG_FIFO0THR 0xE8
#define REG_FIFO1CNT 0xF0
#define REG_FIFO1THR 0xF4
+#define REG_DMA1REQ 0xF8
#define REG_FIFO0 0x100
#define REG_FIFO1 0x200
@@ -126,6 +129,10 @@
#define FIFOREAD_DATA_MASK (0xfff << 0)
#define FIFOREAD_CHNLID_MASK (0xf << 16)
+/* DMA ENABLE/CLEAR Register */
+#define DMA_FIFO0 BIT(0)
+#define DMA_FIFO1 BIT(1)
+
/* Sequencer Status */
#define SEQ_STATUS BIT(5)
#define CHARGE_STEP 0x11
@@ -155,6 +162,7 @@ struct ti_tscadc_dev {
struct device *dev;
struct regmap *regmap;
void __iomem *tscadc_base;
+ phys_addr_t tscadc_phys_base;
int irq;
int used_cells; /* 1-2 */
int tsc_wires;
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 7a26286db895..fba44abd05ba 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -100,6 +100,11 @@
#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
/*
+ * Some controllers have a 32-bit wide data port register
+ */
+#define TMIO_MMC_32BIT_DATA_PORT (1 << 9)
+
+/*
* Some controllers allows to set SDx actual clock
*/
#define TMIO_MMC_CLK_ACTUAL (1 << 10)
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index 4ccda8969639..eac285756b37 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -73,13 +73,15 @@
#define TPS65217_PPATH_AC_CURRENT_MASK 0x0C
#define TPS65217_PPATH_USB_CURRENT_MASK 0x03
-#define TPS65217_INT_RESERVEDM BIT(7)
#define TPS65217_INT_PBM BIT(6)
#define TPS65217_INT_ACM BIT(5)
#define TPS65217_INT_USBM BIT(4)
#define TPS65217_INT_PBI BIT(2)
#define TPS65217_INT_ACI BIT(1)
#define TPS65217_INT_USBI BIT(0)
+#define TPS65217_INT_SHIFT 4
+#define TPS65217_INT_MASK (TPS65217_INT_PBM | TPS65217_INT_ACM | \
+ TPS65217_INT_USBM)
#define TPS65217_CHGCONFIG0_TREG BIT(7)
#define TPS65217_CHGCONFIG0_DPPM BIT(6)
@@ -234,12 +236,11 @@ struct tps65217_bl_pdata {
int dft_brightness;
};
-enum tps65217_irq_type {
- TPS65217_IRQ_PB,
- TPS65217_IRQ_AC,
- TPS65217_IRQ_USB,
- TPS65217_NUM_IRQ
-};
+/* Interrupt numbers */
+#define TPS65217_IRQ_USB 0
+#define TPS65217_IRQ_AC 1
+#define TPS65217_IRQ_PB 2
+#define TPS65217_NUM_IRQ 3
/**
* struct tps65217_board - packages regulator init data
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index d1db9527fab5..bccd2d68b1e3 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -282,10 +282,9 @@ struct tps65218 {
struct regulator_desc desc[TPS65218_NUM_REGULATOR];
struct tps_info *info[TPS65218_NUM_REGULATOR];
struct regmap *regmap;
+ u8 *strobes;
};
-int tps65218_reg_read(struct tps65218 *tps, unsigned int reg,
- unsigned int *val);
int tps65218_reg_write(struct tps65218 *tps, unsigned int reg,
unsigned int val, unsigned int level);
int tps65218_set_bits(struct tps65218 *tps, unsigned int reg,
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index 1a603701550e..b25d0297ba88 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -319,21 +319,7 @@ struct tps65912 {
struct regmap_irq_chip_data *irq_data;
};
-static const struct regmap_range tps65912_yes_ranges[] = {
- regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5),
-};
-
-static const struct regmap_access_table tps65912_volatile_table = {
- .yes_ranges = tps65912_yes_ranges,
- .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges),
-};
-
-static const struct regmap_config tps65912_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
- .volatile_table = &tps65912_volatile_table,
-};
+extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
int tps65912_device_exit(struct tps65912 *tps);
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 47492c9631b3..1629a0c32679 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -31,7 +31,11 @@ struct mii_if_info {
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_ethtool_get_link_ksettings(
+ struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_ethtool_set_link_ksettings(
+ struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd);
extern int mii_check_gmii_support(struct mii_if_info *mii);
extern void mii_check_link (struct mii_if_info *mii);
extern unsigned int mii_check_media (struct mii_if_info *mii,
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 722698a43d79..ed30d5d713e3 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -32,6 +32,7 @@
#define STORE_QUEUE_MINOR 155 /* unused */
#define I2O_MINOR 166
#define MICROCODE_MINOR 184
+#define IRNET_MINOR 187
#define VFIO_MINOR 196
#define TUN_MINOR 200
#define CUSE_MINOR 203
@@ -72,6 +73,13 @@ extern int misc_register(struct miscdevice *misc);
extern void misc_deregister(struct miscdevice *misc);
/*
+ * Helper macro for drivers that don't do anything special in the initcall.
+ * This helps in eleminating of boilerplate code.
+ */
+#define builtin_misc_device(__misc_device) \
+ builtin_driver(__misc_device, misc_register)
+
+/*
* Helper macro for drivers that don't do anything special in module init / exit
* call. This helps in eleminating of boilerplate code.
*/
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index c9f379689dd0..6533c16e27ad 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1384,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
bool *vlan_offload_disabled);
+void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
@@ -1460,7 +1462,7 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
u32 max_range_qpn);
-cycle_t mlx4_read_clock(struct mlx4_dev *dev);
+u64 mlx4_read_clock(struct mlx4_dev *dev);
struct mlx4_active_ports {
DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 58276144ba81..52b437431c6a 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -277,6 +277,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
+ MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -552,6 +553,15 @@ struct mlx5_eqe_vport_change {
__be32 rsvd1[6];
} __packed;
+struct mlx5_eqe_port_module {
+ u8 reserved_at_0[1];
+ u8 module;
+ u8 reserved_at_2[1];
+ u8 module_status;
+ u8 reserved_at_4[2];
+ u8 error_type;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -565,6 +575,7 @@ union ev_data {
struct mlx5_eqe_page_req req_pages;
struct mlx5_eqe_page_fault page_fault;
struct mlx5_eqe_vport_change vport_change;
+ struct mlx5_eqe_port_module port_module;
} __packed;
struct mlx5_eqe {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ecc451d89ccd..735b36335f29 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -104,6 +104,8 @@ enum {
enum {
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
+ MLX5_REG_DCBX_PARAM = 0x4020,
+ MLX5_REG_DCBX_APP = 0x4021,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -123,6 +125,11 @@ enum {
MLX5_REG_MLCR = 0x902b,
};
+enum mlx5_dcbx_oper_mode {
+ MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
+ MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
+};
+
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
@@ -208,7 +215,7 @@ struct mlx5_cmd_first {
struct mlx5_cmd_msg {
struct list_head list;
- struct cache_ent *cache;
+ struct cmd_msg_cache *parent;
u32 len;
struct mlx5_cmd_first first;
struct mlx5_cmd_mailbox *next;
@@ -228,17 +235,17 @@ struct mlx5_cmd_debug {
u16 outlen;
};
-struct cache_ent {
+struct cmd_msg_cache {
/* protect block chain allocations
*/
spinlock_t lock;
struct list_head head;
+ unsigned int max_inbox_size;
+ unsigned int num_ent;
};
-struct cmd_msg_cache {
- struct cache_ent large;
- struct cache_ent med;
-
+enum {
+ MLX5_NUM_COMMAND_CACHES = 5,
};
struct mlx5_cmd_stats {
@@ -281,7 +288,7 @@ struct mlx5_cmd {
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct pci_pool *pool;
struct mlx5_cmd_debug dbg;
- struct cmd_msg_cache cache;
+ struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
};
@@ -310,6 +317,13 @@ struct mlx5_buf {
u8 page_shift;
};
+struct mlx5_frag_buf {
+ struct mlx5_buf_list *frags;
+ int npages;
+ int size;
+ u8 page_shift;
+};
+
struct mlx5_eq_tasklet {
struct list_head list;
struct list_head process_list;
@@ -498,6 +512,31 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
+enum port_module_event_status_type {
+ MLX5_MODULE_STATUS_PLUGGED = 0x1,
+ MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+ MLX5_MODULE_STATUS_ERROR = 0x3,
+ MLX5_MODULE_STATUS_NUM = 0x3,
+};
+
+enum port_module_event_error_type {
+ MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
+ MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
+ MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
+ MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
+ MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
+ MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
+ MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN,
+ MLX5_MODULE_EVENT_ERROR_NUM,
+};
+
+struct mlx5_port_module_event_stats {
+ u64 status_counters[MLX5_MODULE_STATUS_NUM];
+ u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
@@ -559,6 +598,8 @@ struct mlx5_priv {
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
+
+ struct mlx5_port_module_event_stats pme_stats;
};
enum mlx5_device_state {
@@ -787,6 +828,9 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_frag_buf *buf, int node);
+void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -831,6 +875,7 @@ void mlx5_unregister_debugfs(void);
int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 93ebc5e21334..949b24b6c479 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -42,6 +42,10 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
};
+enum {
+ MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
+};
+
#define LEFTOVERS_RULE_NUM 2
static inline void build_leftovers_ft_param(int *priority,
int *n_ent,
@@ -69,8 +73,8 @@ enum mlx5_flow_namespace_type {
struct mlx5_flow_table;
struct mlx5_flow_group;
-struct mlx5_flow_rule;
struct mlx5_flow_namespace;
+struct mlx5_flow_handle;
struct mlx5_flow_spec {
u8 match_criteria_enable;
@@ -97,13 +101,15 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
int max_num_groups,
- u32 level);
+ u32 level,
+ u32 flags);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
- u32 level);
+ u32 level,
+ u32 flags);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
@@ -124,21 +130,28 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_flow_act {
+ u32 action;
+ u32 flow_tag;
+ u32 encap_id;
+};
+
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
-struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
- u32 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest);
-void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
-
-int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
- struct mlx5_flow_destination *dest);
-
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule);
+struct mlx5_flow_handle *
+mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num);
+void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
+
+int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
+ struct mlx5_flow_destination *new_dest,
+ struct mlx5_flow_destination *old_dest);
+
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6045d4d58065..a852e9db6f0d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -83,6 +83,7 @@ enum {
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
MLX5_CMD_OP_QUERY_ISSI = 0x10a,
MLX5_CMD_OP_SET_ISSI = 0x10b,
+ MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -145,6 +146,12 @@ enum {
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
+ MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
+ MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784,
+ MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785,
+ MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786,
+ MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787,
MLX5_CMD_OP_ALLOC_PD = 0x800,
MLX5_CMD_OP_DEALLOC_PD = 0x801,
MLX5_CMD_OP_ALLOC_UAR = 0x802,
@@ -537,13 +544,27 @@ struct mlx5_ifc_e_switch_cap_bits {
struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing[0x1];
- u8 reserved_0[0x1f];
- u8 reserved_1[0x20];
+ u8 esw_scheduling[0x1];
+ u8 reserved_at_2[0x1e];
+
+ u8 reserved_at_20[0x20];
+
u8 packet_pacing_max_rate[0x20];
+
u8 packet_pacing_min_rate[0x20];
- u8 reserved_2[0x10];
+
+ u8 reserved_at_80[0x10];
u8 packet_pacing_rate_table_size[0x10];
- u8 reserved_3[0x760];
+
+ u8 esw_element_type[0x10];
+ u8 esw_tsar_type[0x10];
+
+ u8 reserved_at_c0[0x10];
+ u8 max_qos_para_vport[0x10];
+
+ u8 max_tsar_bw_share[0x20];
+
+ u8 reserved_at_100[0x700];
};
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
@@ -556,7 +577,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 self_lb_en_modifiable[0x1];
u8 reserved_at_9[0x2];
u8 max_lso_cap[0x5];
- u8 reserved_at_10[0x2];
+ u8 multi_pkt_send_wqe[0x2];
u8 wqe_inline_mode[0x2];
u8 rss_ind_tbl_cap[0x4];
u8 reg_umr_sq[0x1];
@@ -804,7 +825,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 early_vf_enable[0x1];
u8 reserved_at_1a9[0x2];
u8 local_ca_ack_delay[0x5];
- u8 reserved_at_1af[0x2];
+ u8 port_module_event[0x1];
+ u8 reserved_at_1b0[0x1];
u8 ports_check[0x1];
u8 reserved_at_1b2[0x1];
u8 disable_link_up[0x1];
@@ -888,7 +910,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_at_261[0x1];
+ u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
@@ -2333,6 +2355,30 @@ struct mlx5_ifc_sqc_bits {
struct mlx5_ifc_wq_bits wq;
};
+enum {
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+};
+
+struct mlx5_ifc_scheduling_context_bits {
+ u8 element_type[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 element_attributes[0x20];
+
+ u8 parent_element_id[0x20];
+
+ u8 reserved_at_60[0x40];
+
+ u8 bw_share[0x20];
+
+ u8 max_average_bw[0x20];
+
+ u8 reserved_at_e0[0x120];
+};
+
struct mlx5_ifc_rqtc_bits {
u8 reserved_at_0[0xa0];
@@ -2844,7 +2890,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x200];
+ u8 reserved_at_180[0x880];
struct mlx5_ifc_wq_bits wq;
};
@@ -2920,6 +2966,29 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_at_20[0x60];
};
+struct mlx5_ifc_vport_tc_element_bits {
+ u8 traffic_class[0x4];
+ u8 reserved_at_4[0xc];
+ u8 vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_element_bits {
+ u8 reserved_at_0[0x10];
+ u8 vport_number[0x10];
+};
+
+enum {
+ TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
+ TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+};
+
+struct mlx5_ifc_tsar_element_bits {
+ u8 reserved_at_0[0x8];
+ u8 tsar_type[0x8];
+ u8 reserved_at_10[0x10];
+};
+
struct mlx5_ifc_teardown_hca_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3540,6 +3609,39 @@ struct mlx5_ifc_query_special_contexts_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_query_scheduling_element_out_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xc0];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
+enum {
+ SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+};
+
+struct mlx5_ifc_query_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
struct mlx5_ifc_query_rqt_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3904,6 +4006,25 @@ struct mlx5_ifc_query_issi_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_set_driver_version_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_driver_version_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+ u8 driver_version[64][0x8];
+};
+
struct mlx5_ifc_query_hca_vport_pkey_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4725,6 +4846,43 @@ struct mlx5_ifc_modify_sq_in_bits {
struct mlx5_ifc_sqc_bits ctx;
};
+struct mlx5_ifc_modify_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+enum {
+ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1,
+ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2,
+};
+
+struct mlx5_ifc_modify_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x20];
+
+ u8 modify_bitmask[0x20];
+
+ u8 reserved_at_c0[0x40];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
struct mlx5_ifc_modify_rqt_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5390,6 +5548,30 @@ struct mlx5_ifc_destroy_sq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_destroy_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+struct mlx5_ifc_destroy_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
struct mlx5_ifc_destroy_rqt_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6017,6 +6199,36 @@ struct mlx5_ifc_create_sq_in_bits {
struct mlx5_ifc_sqc_bits ctx;
};
+struct mlx5_ifc_create_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_a0[0x160];
+};
+
+struct mlx5_ifc_create_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 reserved_at_60[0xa0];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
struct mlx5_ifc_create_rqt_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index b3065acd20b4..e527732fb31b 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -94,6 +94,9 @@ enum mlx5e_link_mode {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
+#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
@@ -138,8 +141,12 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
int mlx5_max_tc(struct mlx5_core_dev *mdev);
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc);
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_unit);
@@ -155,4 +162,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data);
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index 33c97dc900f8..1cde0fd53f90 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -55,7 +55,7 @@ struct mlx5_srq_attr {
u32 lwm;
u32 user_index;
u64 db_record;
- u64 *pas;
+ __be64 *pas;
};
struct mlx5_core_dev;
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 451b0bde9083..ec35157ea725 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -36,6 +36,12 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
+enum {
+ MLX5_CAP_INLINE_MODE_L2,
+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
+ MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
+};
+
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport);
@@ -43,8 +49,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
-void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
- u8 *min_inline);
+int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a92c8d73aeaf..fe6b4036664a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -292,36 +292,23 @@ extern pgprot_t protection_map[16];
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
+ struct vm_area_struct *vma; /* Target VMA */
unsigned int flags; /* FAULT_FLAG_xxx flags */
gfp_t gfp_mask; /* gfp mask to be used for allocations */
pgoff_t pgoff; /* Logical page offset based on vma */
- void __user *virtual_address; /* Faulting virtual address */
+ unsigned long address; /* Faulting virtual address */
+ pmd_t *pmd; /* Pointer to pmd entry matching
+ * the 'address' */
+ pte_t orig_pte; /* Value of PTE at the time of fault */
- struct page *cow_page; /* Handler may choose to COW */
+ struct page *cow_page; /* Page handler may use for COW fault */
+ struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
* VM_FAULT_ERROR).
*/
- void *entry; /* ->fault handler can alternatively
- * return locked DAX entry. In that
- * case handler should return
- * VM_FAULT_DAX_LOCKED and fill in
- * entry here.
- */
-};
-
-/*
- * Page fault context: passes though page fault handler instead of endless list
- * of function arguments.
- */
-struct fault_env {
- struct vm_area_struct *vma; /* Target VMA */
- unsigned long address; /* Faulting virtual address */
- unsigned int flags; /* FAULT_FLAG_xxx flags */
- pmd_t *pmd; /* Pointer to pmd entry matching
- * the 'address'
- */
+ /* These three entries are valid only while holding ptl lock */
pte_t *pte; /* Pointer to pte entry matching
* the 'address'. NULL if the page
* table hasn't been allocated.
@@ -351,7 +338,7 @@ struct vm_operations_struct {
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
pmd_t *, unsigned int flags);
- void (*map_pages)(struct fault_env *fe,
+ void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
/* notification that a previously read-only page is about to become
@@ -625,8 +612,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
+int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
struct page *page);
+int finish_fault(struct vm_fault *vmf);
+int finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
@@ -1110,7 +1099,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
-#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */
+#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
@@ -1221,6 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
+ spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
@@ -1270,19 +1261,18 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
+extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long addr, void *buf, int len, unsigned int gup_flags);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas);
+ struct vm_area_struct **vmas, int *locked);
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked);
-long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
@@ -1768,6 +1758,8 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
+extern void __init pagecache_init(void);
+
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
@@ -2097,7 +2089,7 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
-extern void filemap_map_pages(struct fault_env *fe,
+extern void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4a8acedf4b7d..808751d7b737 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -473,6 +473,7 @@ struct mm_struct {
*/
struct task_struct __rcu *owner;
#endif
+ struct user_namespace *user_ns;
/* store ref to file /proc/<pid>/exe symlink points to */
struct file __rcu *exe_file;
@@ -508,10 +509,6 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
-#ifdef CONFIG_X86_INTEL_MPX
- /* address of the bounds directory */
- void __user *bd_addr;
-#endif
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 73fad83acbcb..95d69d498296 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -89,6 +89,8 @@ struct mmc_ext_csd {
unsigned int boot_ro_lock; /* ro lock support */
bool boot_ro_lockable;
bool ffu_capable; /* Firmware upgrade support */
+ bool cmdq_support; /* Command Queue supported */
+ unsigned int cmdq_depth; /* Command Queue depth */
#define MMC_FIRMWARE_LEN 8
u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */
u8 raw_exception_status; /* 54 */
@@ -207,18 +209,6 @@ struct sdio_func_tuple;
#define SDIO_MAX_FUNCS 7
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
- MMC_BLK_NEW_REQUEST,
-};
-
/* The number of MMC physical partitions. These consist of:
* boot partitions (2), general purpose partitions (4) and
* RPMB partition (1) in MMC v4.4.
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 2b953eb8ceae..e33cc748dcfe 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -15,6 +15,18 @@ struct request;
struct mmc_data;
struct mmc_request;
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
+ MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
+ MMC_BLK_NEW_REQUEST,
+};
+
struct mmc_command {
u32 opcode;
u32 arg;
@@ -150,7 +162,8 @@ struct mmc_async_req;
extern int mmc_stop_bkops(struct mmc_card *);
extern int mmc_read_bkops_status(struct mmc_card *);
extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
- struct mmc_async_req *, int *);
+ struct mmc_async_req *,
+ enum mmc_blk_status *);
extern int mmc_interrupt_hpi(struct mmc_card *);
extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
extern void mmc_wait_for_req_done(struct mmc_host *host,
@@ -163,6 +176,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
+extern int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
#define MMC_ERASE_ARG 0x00000000
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index f5af2bd35e7f..15db6f83f53f 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -39,6 +39,12 @@ enum {
EVENT_DATA_ERROR,
};
+enum dw_mci_cookie {
+ COOKIE_UNMAPPED,
+ COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */
+ COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */
+};
+
struct mmc_data;
enum {
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0b2439441cc8..8bc884121465 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -93,8 +93,7 @@ struct mmc_host_ops {
*/
void (*post_req)(struct mmc_host *host, struct mmc_request *req,
int err);
- void (*pre_req)(struct mmc_host *host, struct mmc_request *req,
- bool is_first_req);
+ void (*pre_req)(struct mmc_host *host, struct mmc_request *req);
void (*request)(struct mmc_host *host, struct mmc_request *req);
/*
@@ -173,7 +172,7 @@ struct mmc_async_req {
* Check error status of completed mmc request.
* Returns 0 if success otherwise non zero.
*/
- int (*err_check) (struct mmc_card *, struct mmc_async_req *);
+ enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *);
};
/**
@@ -198,14 +197,12 @@ struct mmc_slot {
* @is_new_req wake up reason was new request
* @is_waiting_last_req mmc context waiting for single running request
* @wait wait queue
- * @lock lock to protect data fields
*/
struct mmc_context_info {
bool is_done_rcv;
bool is_new_req;
bool is_waiting_last_req;
wait_queue_head_t wait;
- spinlock_t lock;
};
struct regulator;
@@ -495,11 +492,6 @@ static inline int mmc_host_uhs(struct mmc_host *host)
MMC_CAP_UHS_DDR50);
}
-static inline int mmc_host_packed_wr(struct mmc_host *host)
-{
- return host->caps2 & MMC_CAP2_PACKED_WR;
-}
-
static inline int mmc_card_hs(struct mmc_card *card)
{
return card->host->ios.timing == MMC_TIMING_SD_HS ||
@@ -546,6 +538,11 @@ static inline void mmc_retune_recheck(struct mmc_host *host)
host->retune_now = 1;
}
+static inline bool mmc_can_retune(struct mmc_host *host)
+{
+ return host->can_retune == 1;
+}
+
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index c376209c70ef..672730acc705 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -84,6 +84,13 @@
#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */
#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
+ /* class 11 */
+#define MMC_QUE_TASK_PARAMS 44 /* ac [20:16] task id R1 */
+#define MMC_QUE_TASK_ADDR 45 /* ac [31:0] data addr R1 */
+#define MMC_EXECUTE_READ_TASK 46 /* adtc [20:16] task id R1 */
+#define MMC_EXECUTE_WRITE_TASK 47 /* adtc [20:16] task id R1 */
+#define MMC_CMDQ_TASK_MGMT 48 /* ac [20:16] task id R1b */
+
static inline bool mmc_op_multi(u32 opcode)
{
return opcode == MMC_WRITE_MULTIPLE_BLOCK ||
@@ -272,6 +279,7 @@ struct _mmc_csd {
* EXT_CSD fields
*/
+#define EXT_CSD_CMDQ_MODE_EN 15 /* R/W */
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
@@ -331,6 +339,8 @@ struct _mmc_csd {
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
+#define EXT_CSD_CMDQ_DEPTH 307 /* RO */
+#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
@@ -438,6 +448,13 @@ struct _mmc_csd {
#define EXT_CSD_MANUAL_BKOPS_MASK 0x01
/*
+ * Command Queue
+ */
+#define EXT_CSD_CMDQ_MODE_ENABLED BIT(0)
+#define EXT_CSD_CMDQ_DEPTH_MASK GENMASK(4, 0)
+#define EXT_CSD_CMDQ_SUPPORTED BIT(0)
+
+/*
* MMC_SWITCH access modes
*/
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 3945a8c9d3cb..a7972cd3bc14 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -29,5 +29,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
+bool mmc_can_gpio_cd(struct mmc_host *host);
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0f088f3a2fed..36d9896fbc1e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -246,7 +246,7 @@ struct lruvec {
#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
/* LRU Isolation modes. */
-typedef unsigned __bitwise__ isolate_mode_t;
+typedef unsigned __bitwise isolate_mode_t;
enum zone_watermarks {
WMARK_MIN,
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ed84c07f6a51..8a57f0b1242d 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -175,7 +175,8 @@ struct ap_device_id {
kernel_ulong_t driver_info;
};
-#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
+#define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01
+#define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02
/* s390 css bus devices (subchannels) */
struct css_device_id {
diff --git a/include/linux/module.h b/include/linux/module.h
index 0c3207d26ac0..7c84273d60b9 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -399,7 +399,7 @@ struct module {
/* Arch-specific module values */
struct mod_arch_specific arch;
- unsigned int taints; /* same bits as kernel:tainted */
+ unsigned long taints; /* same bits as kernel:taint_flags */
#ifdef CONFIG_GENERIC_BUG
/* Support for BUG */
@@ -412,7 +412,7 @@ struct module {
/* Protected by RCU and/or module_mutex: use rcu_dereference() */
struct mod_kallsyms *kallsyms;
struct mod_kallsyms core_kallsyms;
-
+
/* Section attributes */
struct module_sect_attrs *sect_attrs;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 1172cce949a4..c6f55158d5e5 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -79,12 +79,12 @@ extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
-extern struct vfsmount *mnt_clone_internal(struct path *path);
+extern struct vfsmount *mnt_clone_internal(const struct path *path);
extern int __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
struct path;
-extern struct vfsmount *clone_private_mount(struct path *path);
+extern struct vfsmount *clone_private_mount(const struct path *path);
struct file_system_type;
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
@@ -98,4 +98,6 @@ extern dev_t name_to_dev_t(const char *name);
extern unsigned int sysctl_mount_max;
+extern bool path_is_mountpoint(const struct path *path);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index d8905a229f34..c5f3a012ae62 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -142,6 +142,12 @@ enum nand_ecc_algo {
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
#define NAND_ECC_MAXIMIZE BIT(1)
+/*
+ * If your controller already sends the required NAND commands when
+ * reading or writing a page, then the framework is not supposed to
+ * send READ0 and SEQIN/PAGEPROG respectively.
+ */
+#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
/* Bit mask for flags passed to do_nand_read_ecc */
#define NAND_GET_DEVICE 0x80
@@ -186,6 +192,7 @@ enum nand_ecc_algo {
/* Macros to identify the above */
#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE)
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
@@ -210,6 +217,16 @@ enum nand_ecc_algo {
*/
#define NAND_USE_BOUNCE_BUFFER 0x00100000
+/*
+ * In case your controller is implementing ->cmd_ctrl() and is relying on the
+ * default ->cmdfunc() implementation, you may want to let the core handle the
+ * tCCS delay which is required when a column change (RNDIN or RNDOUT) is
+ * requested.
+ * If your controller already takes care of this delay, you don't need to set
+ * this flag.
+ */
+#define NAND_WAIT_TCCS 0x00200000
+
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -558,6 +575,11 @@ struct nand_ecc_ctrl {
int page);
};
+static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
+{
+ return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
+}
+
/**
* struct nand_buffers - buffer structure for read/write
* @ecccalc: buffer pointer for calculated ECC, size is oobsize.
@@ -584,6 +606,10 @@ struct nand_buffers {
*
* All these timings are expressed in picoseconds.
*
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
* @tALH_min: ALE hold time
* @tADL_min: ALE to data loading time
* @tALS_min: ALE setup time
@@ -621,6 +647,10 @@ struct nand_buffers {
* @tWW_min: WP# transition to WE# low
*/
struct nand_sdr_timings {
+ u32 tBERS_max;
+ u32 tCCS_min;
+ u32 tPROG_max;
+ u32 tR_max;
u32 tALH_min;
u32 tADL_min;
u32 tALS_min;
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
deleted file mode 100644
index 4ac8b1977b73..000000000000
--- a/include/linux/mutex-debug.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __LINUX_MUTEX_DEBUG_H
-#define __LINUX_MUTEX_DEBUG_H
-
-#include <linux/linkage.h>
-#include <linux/lockdep.h>
-#include <linux/debug_locks.h>
-
-/*
- * Mutexes - debugging helpers:
- */
-
-#define __DEBUG_MUTEX_INITIALIZER(lockname) \
- , .magic = &lockname
-
-#define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
-} while (0)
-
-extern void mutex_destroy(struct mutex *lock);
-
-#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531e7d7a..b97870f2debd 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -18,6 +18,7 @@
#include <linux/atomic.h>
#include <asm/processor.h>
#include <linux/osq_lock.h>
+#include <linux/debug_locks.h>
/*
* Simple, straightforward mutexes with strict semantics:
@@ -48,16 +49,12 @@
* locks and tasks (and only those tasks)
*/
struct mutex {
- /* 1: unlocked, 0: locked, negative: locked, possible waiters */
- atomic_t count;
+ atomic_long_t owner;
spinlock_t wait_lock;
- struct list_head wait_list;
-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
- struct task_struct *owner;
-#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
+ struct list_head wait_list;
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
#endif
@@ -66,6 +63,11 @@ struct mutex {
#endif
};
+static inline struct task_struct *__mutex_owner(struct mutex *lock)
+{
+ return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
+}
+
/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
@@ -79,9 +81,20 @@ struct mutex_waiter {
};
#ifdef CONFIG_DEBUG_MUTEXES
-# include <linux/mutex-debug.h>
+
+#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+ , .magic = &lockname
+
+extern void mutex_destroy(struct mutex *lock);
+
#else
+
# define __DEBUG_MUTEX_INITIALIZER(lockname)
+
+static inline void mutex_destroy(struct mutex *lock) {}
+
+#endif
+
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
@@ -90,14 +103,12 @@ struct mutex_waiter {
*
* It is not allowed to initialize an already locked mutex.
*/
-# define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
} while (0)
-static inline void mutex_destroy(struct mutex *lock) {}
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
#define __MUTEX_INITIALIZER(lockname) \
- { .count = ATOMIC_INIT(1) \
+ { .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name,
*/
static inline int mutex_is_locked(struct mutex *lock)
{
- return atomic_read(&lock->count) != 1;
+ /*
+ * XXX think about spin_is_locked
+ */
+ return __mutex_owner(lock) != NULL;
}
/*
@@ -175,4 +189,35 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+/*
+ * These values are chosen such that FAIL and SUCCESS match the
+ * values of the regular mutex_trylock().
+ */
+enum mutex_trylock_recursive_enum {
+ MUTEX_TRYLOCK_FAILED = 0,
+ MUTEX_TRYLOCK_SUCCESS = 1,
+ MUTEX_TRYLOCK_RECURSIVE,
+};
+
+/**
+ * mutex_trylock_recursive - trylock variant that allows recursive locking
+ * @lock: mutex to be locked
+ *
+ * This function should not be used, _ever_. It is purely for hysterical GEM
+ * raisins, and once those are gone this will be removed.
+ *
+ * Returns:
+ * MUTEX_TRYLOCK_FAILED - trylock failed,
+ * MUTEX_TRYLOCK_SUCCESS - lock acquired,
+ * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
+ */
+static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
+mutex_trylock_recursive(struct mutex *lock)
+{
+ if (unlikely(__mutex_owner(lock) == current))
+ return MUTEX_TRYLOCK_RECURSIVE;
+
+ return mutex_trylock(lock);
+}
+
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e16a2a980ea8..994f7423a74b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -192,6 +192,7 @@ struct net_device_stats {
#ifdef CONFIG_RPS
#include <linux/static_key.h>
extern struct static_key rps_needed;
+extern struct static_key rfs_needed;
#endif
struct neighbour;
@@ -316,7 +317,6 @@ struct napi_struct {
unsigned int gro_count;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
- spinlock_t poll_lock;
int poll_owner;
#endif
struct net_device *dev;
@@ -334,6 +334,16 @@ enum {
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
+};
+
+enum {
+ NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED),
+ NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE),
+ NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC),
+ NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED),
+ NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
};
enum gro_result {
@@ -453,32 +463,22 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false;
}
-void __napi_complete(struct napi_struct *n);
-void napi_complete_done(struct napi_struct *n, int work_done);
+bool __napi_complete(struct napi_struct *n);
+bool napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
* @n: NAPI context
*
* Mark NAPI processing as complete.
* Consider using napi_complete_done() instead.
+ * Return false if device should avoid rearming interrupts.
*/
-static inline void napi_complete(struct napi_struct *n)
+static inline bool napi_complete(struct napi_struct *n)
{
return napi_complete_done(n, 0);
}
/**
- * napi_hash_add - add a NAPI to global hashtable
- * @napi: NAPI context
- *
- * Generate a new napi_id and store a @napi under it in napi_hash.
- * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
- * Note: This is normally automatically done from netif_napi_add(),
- * so might disappear in a future Linux version.
- */
-void napi_hash_add(struct napi_struct *napi);
-
-/**
* napi_hash_del - remove a NAPI from global table
* @napi: NAPI context
*
@@ -732,8 +732,8 @@ struct xps_dev_maps {
struct rcu_head rcu;
struct xps_map __rcu *cpu_map[0];
};
-#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
- (nr_cpu_ids * sizeof(struct xps_map *)))
+#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
+ (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
#endif /* CONFIG_XPS */
#define TC_MAX_QUEUE 16
@@ -803,6 +803,7 @@ struct tc_to_netdev {
struct tc_cls_matchall_offload *cls_mall;
struct tc_cls_bpf_offload *cls_bpf;
};
+ bool egress_dev;
};
/* These structures hold the attributes of xdp state that are being passed
@@ -926,7 +927,7 @@ struct netdev_xdp {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
- * bool (*ndo_has_offload_stats)(int attr_id)
+ * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
* Return true if this device supports offload stats of this attr_id.
*
* int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
@@ -1166,7 +1167,7 @@ struct net_device_ops {
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
- bool (*ndo_has_offload_stats)(int attr_id);
+ bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
int (*ndo_get_offload_stats)(int attr_id,
const struct net_device *dev,
void *attr_data);
@@ -1456,7 +1457,6 @@ enum netdev_priv_flags {
* @ptype_specific: Device-specific, protocol-specific packet handlers
*
* @adj_list: Directly linked devices, like slaves for bonding
- * @all_adj_list: All linked devices, *including* neighbours
* @features: Currently active device features
* @hw_features: User-changeable features
*
@@ -1506,6 +1506,8 @@ enum netdev_priv_flags {
* @if_port: Selectable AUI, TP, ...
* @dma: DMA channel
* @mtu: Interface MTU value
+ * @min_mtu: Interface Minimum MTU value
+ * @max_mtu: Interface Maximum MTU value
* @type: Interface hardware type
* @hard_header_len: Maximum hardware header length.
*
@@ -1673,11 +1675,6 @@ struct net_device {
struct list_head lower;
} adj_list;
- struct {
- struct list_head upper;
- struct list_head lower;
- } all_adj_list;
-
netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
@@ -1726,6 +1723,8 @@ struct net_device {
unsigned char dma;
unsigned int mtu;
+ unsigned int min_mtu;
+ unsigned int max_mtu;
unsigned short type;
unsigned short hard_header_len;
@@ -1922,34 +1921,10 @@ int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
return 0;
}
-static inline
-void netdev_reset_tc(struct net_device *dev)
-{
- dev->num_tc = 0;
- memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
- memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
-}
-
-static inline
-int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
-{
- if (tc >= dev->num_tc)
- return -EINVAL;
-
- dev->tc_to_txq[tc].count = count;
- dev->tc_to_txq[tc].offset = offset;
- return 0;
-}
-
-static inline
-int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
-{
- if (num_tc > TC_MAX_QUEUE)
- return -EINVAL;
-
- dev->num_tc = num_tc;
- return 0;
-}
+int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
+void netdev_reset_tc(struct net_device *dev);
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
static inline
int netdev_get_num_tc(struct net_device *dev)
@@ -2686,71 +2661,6 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
-struct skb_csum_offl_spec {
- __u16 ipv4_okay:1,
- ipv6_okay:1,
- encap_okay:1,
- ip_options_okay:1,
- ext_hdrs_okay:1,
- tcp_okay:1,
- udp_okay:1,
- sctp_okay:1,
- vlan_okay:1,
- no_encapped_ipv6:1,
- no_not_encapped:1;
-};
-
-bool __skb_csum_offload_chk(struct sk_buff *skb,
- const struct skb_csum_offl_spec *spec,
- bool *csum_encapped,
- bool csum_help);
-
-static inline bool skb_csum_offload_chk(struct sk_buff *skb,
- const struct skb_csum_offl_spec *spec,
- bool *csum_encapped,
- bool csum_help)
-{
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return false;
-
- return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
-}
-
-static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
- const struct skb_csum_offl_spec *spec)
-{
- bool csum_encapped;
-
- return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
-}
-
-static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
-{
- static const struct skb_csum_offl_spec csum_offl_spec = {
- .ipv4_okay = 1,
- .ip_options_okay = 1,
- .ipv6_okay = 1,
- .vlan_okay = 1,
- .tcp_okay = 1,
- .udp_okay = 1,
- };
-
- return skb_csum_offload_chk_help(skb, &csum_offl_spec);
-}
-
-static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
-{
- static const struct skb_csum_offl_spec csum_offl_spec = {
- .ipv4_okay = 1,
- .ip_options_okay = 1,
- .tcp_okay = 1,
- .udp_okay = 1,
- .vlan_okay = 1,
- };
-
- return skb_csum_offload_chk_help(skb, &csum_offl_spec);
-}
-
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -3345,7 +3255,7 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
-int dev_change_xdp_fd(struct net_device *dev, int fd);
+int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3554,6 +3464,17 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
txq->xmit_lock_owner = cpu;
}
+static inline bool __netif_tx_acquire(struct netdev_queue *txq)
+{
+ __acquire(&txq->_xmit_lock);
+ return true;
+}
+
+static inline void __netif_tx_release(struct netdev_queue *txq)
+{
+ __release(&txq->_xmit_lock);
+}
+
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
@@ -3655,17 +3576,21 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
#define HARD_TX_LOCK(dev, txq, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_lock(txq, cpu); \
+ } else { \
+ __netif_tx_acquire(txq); \
} \
}
#define HARD_TX_TRYLOCK(dev, txq) \
(((dev->features & NETIF_F_LLTX) == 0) ? \
__netif_tx_trylock(txq) : \
- true )
+ __netif_tx_acquire(txq))
#define HARD_TX_UNLOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_unlock(txq); \
+ } else { \
+ __netif_tx_release(txq); \
} \
}
@@ -3884,12 +3809,13 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
updev; \
updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
-/* iterate through upper list, must be called under RCU read lock */
-#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
- for (iter = &(dev)->all_adj_list.upper, \
- updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
- updev; \
- updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
+int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
+ int (*fn)(struct net_device *upper_dev,
+ void *data),
+ void *data);
+
+bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
+ struct net_device *upper_dev);
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
@@ -3922,17 +3848,14 @@ struct net_device *netdev_all_lower_get_next(struct net_device *dev,
struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
struct list_head **iter);
-#define netdev_for_each_all_lower_dev(dev, ldev, iter) \
- for (iter = (dev)->all_adj_list.lower.next, \
- ldev = netdev_all_lower_get_next(dev, &(iter)); \
- ldev; \
- ldev = netdev_all_lower_get_next(dev, &(iter)))
-
-#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
- for (iter = &(dev)->all_adj_list.lower, \
- ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
- ldev; \
- ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
+int netdev_walk_all_lower_dev(struct net_device *dev,
+ int (*fn)(struct net_device *lower_dev,
+ void *data),
+ void *data);
+int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
+ int (*fn)(struct net_device *lower_dev,
+ void *data),
+ void *data);
void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
@@ -4009,19 +3932,6 @@ static inline bool can_checksum_protocol(netdev_features_t features,
}
}
-/* Map an ethertype into IP protocol if possible */
-static inline int eproto_to_ipproto(int eproto)
-{
- switch (eproto) {
- case htons(ETH_P_IP):
- return IPPROTO_IP;
- case htons(ETH_P_IPV6):
- return IPPROTO_IPV6;
- default:
- return -1;
- }
-}
-
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev);
#else
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index abc7fdcb9eb1..a4b97be30b28 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -49,13 +49,11 @@ struct sock;
struct nf_hook_state {
unsigned int hook;
- int thresh;
u_int8_t pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
struct net *net;
- struct nf_hook_entry __rcu *hook_entries;
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
@@ -77,14 +75,42 @@ struct nf_hook_ops {
struct nf_hook_entry {
struct nf_hook_entry __rcu *next;
- struct nf_hook_ops ops;
+ nf_hookfn *hook;
+ void *priv;
const struct nf_hook_ops *orig_ops;
};
+static inline void
+nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops)
+{
+ entry->next = NULL;
+ entry->hook = ops->hook;
+ entry->priv = ops->priv;
+ entry->orig_ops = ops;
+}
+
+static inline int
+nf_hook_entry_priority(const struct nf_hook_entry *entry)
+{
+ return entry->orig_ops->priority;
+}
+
+static inline int
+nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
+ struct nf_hook_state *state)
+{
+ return entry->hook(entry->priv, skb, state);
+}
+
+static inline const struct nf_hook_ops *
+nf_hook_entry_ops(const struct nf_hook_entry *entry)
+{
+ return entry->orig_ops;
+}
+
static inline void nf_hook_state_init(struct nf_hook_state *p,
- struct nf_hook_entry *hook_entry,
unsigned int hook,
- int thresh, u_int8_t pf,
+ u_int8_t pf,
struct net_device *indev,
struct net_device *outdev,
struct sock *sk,
@@ -92,13 +118,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
p->hook = hook;
- p->thresh = thresh;
p->pf = pf;
p->in = indev;
p->out = outdev;
p->sk = sk;
p->net = net;
- RCU_INIT_POINTER(p->hook_entries, hook_entry);
p->okfn = okfn;
}
@@ -152,23 +176,20 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#endif
-int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
+ struct nf_hook_entry *entry);
/**
- * nf_hook_thresh - call a netfilter hook
+ * nf_hook - call a netfilter hook
*
* Returns 1 if the hook has allowed the packet to pass. The function
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
*/
-static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
- struct net *net,
- struct sock *sk,
- struct sk_buff *skb,
- struct net_device *indev,
- struct net_device *outdev,
- int (*okfn)(struct net *, struct sock *, struct sk_buff *),
- int thresh)
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
+ struct sock *sk, struct sk_buff *skb,
+ struct net_device *indev, struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
struct nf_hook_entry *hook_head;
int ret = 1;
@@ -185,24 +206,16 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
if (hook_head) {
struct nf_hook_state state;
- nf_hook_state_init(&state, hook_head, hook, thresh,
- pf, indev, outdev, sk, net, okfn);
+ nf_hook_state_init(&state, hook, pf, indev, outdev,
+ sk, net, okfn);
- ret = nf_hook_slow(skb, &state);
+ ret = nf_hook_slow(skb, &state, hook_head);
}
rcu_read_unlock();
return ret;
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
- struct sock *sk, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct net *, struct sock *, struct sk_buff *))
-{
- return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN);
-}
-
/* Activate hook; either okfn or kfree_skb called, unless a hook
returns NF_STOLEN (in which case, it's up to the hook to deal with
the consequences).
@@ -221,19 +234,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
*/
static inline int
-NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
- struct sk_buff *skb, struct net_device *in,
- struct net_device *out,
- int (*okfn)(struct net *, struct sock *, struct sk_buff *),
- int thresh)
-{
- int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh);
- if (ret == 1)
- ret = okfn(net, sk, skb);
- return ret;
-}
-
-static inline int
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
@@ -242,7 +242,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
int ret;
if (!cond ||
- ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1))
+ ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
ret = okfn(net, sk, skb);
return ret;
}
@@ -252,7 +252,10 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN);
+ int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
+ if (ret == 1)
+ ret = okfn(net, sk, skb);
+ return ret;
}
/* Call setsockopt() */
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 83b9a2e0d8d4..8e42253e5d4d 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -79,10 +79,12 @@ enum ip_set_ext_id {
IPSET_EXT_ID_MAX,
};
+struct ip_set;
+
/* Extension type */
struct ip_set_ext_type {
/* Destroy extension private data (can be NULL) */
- void (*destroy)(void *ext);
+ void (*destroy)(struct ip_set *set, void *ext);
enum ip_set_extension type;
enum ipset_cadt_flags flag;
/* Size and minimal alignment */
@@ -92,17 +94,6 @@ struct ip_set_ext_type {
extern const struct ip_set_ext_type ip_set_extensions[];
-struct ip_set_ext {
- u64 packets;
- u64 bytes;
- u32 timeout;
- u32 skbmark;
- u32 skbmarkmask;
- u32 skbprio;
- u16 skbqueue;
- char *comment;
-};
-
struct ip_set_counter {
atomic64_t bytes;
atomic64_t packets;
@@ -122,6 +113,15 @@ struct ip_set_skbinfo {
u32 skbmarkmask;
u32 skbprio;
u16 skbqueue;
+ u16 __pad;
+};
+
+struct ip_set_ext {
+ struct ip_set_skbinfo skbinfo;
+ u64 packets;
+ u64 bytes;
+ char *comment;
+ u32 timeout;
};
struct ip_set;
@@ -252,6 +252,10 @@ struct ip_set {
u8 flags;
/* Default timeout value, if enabled */
u32 timeout;
+ /* Number of elements (vs timeout) */
+ u32 elements;
+ /* Size of the dynamic extensions (vs timeout) */
+ size_t ext_size;
/* Element data size */
size_t dsize;
/* Offsets to extensions in elements */
@@ -268,7 +272,7 @@ ip_set_ext_destroy(struct ip_set *set, void *data)
*/
if (SET_WITH_COMMENT(set))
ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
- ext_comment(data, set));
+ set, ext_comment(data, set));
}
static inline int
@@ -294,104 +298,6 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
}
-static inline void
-ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
-{
- atomic64_add((long long)bytes, &(counter)->bytes);
-}
-
-static inline void
-ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
-{
- atomic64_add((long long)packets, &(counter)->packets);
-}
-
-static inline u64
-ip_set_get_bytes(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->bytes);
-}
-
-static inline u64
-ip_set_get_packets(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->packets);
-}
-
-static inline void
-ip_set_update_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
-{
- if (ext->packets != ULLONG_MAX &&
- !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
- ip_set_add_bytes(ext->bytes, counter);
- ip_set_add_packets(ext->packets, counter);
- }
- if (flags & IPSET_FLAG_MATCH_COUNTERS) {
- mext->packets = ip_set_get_packets(counter);
- mext->bytes = ip_set_get_bytes(counter);
- }
-}
-
-static inline void
-ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
-{
- mext->skbmark = skbinfo->skbmark;
- mext->skbmarkmask = skbinfo->skbmarkmask;
- mext->skbprio = skbinfo->skbprio;
- mext->skbqueue = skbinfo->skbqueue;
-}
-static inline bool
-ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
-{
- /* Send nonzero parameters only */
- return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
- nla_put_net64(skb, IPSET_ATTR_SKBMARK,
- cpu_to_be64((u64)skbinfo->skbmark << 32 |
- skbinfo->skbmarkmask),
- IPSET_ATTR_PAD)) ||
- (skbinfo->skbprio &&
- nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
- cpu_to_be32(skbinfo->skbprio))) ||
- (skbinfo->skbqueue &&
- nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
- cpu_to_be16(skbinfo->skbqueue)));
-}
-
-static inline void
-ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
- const struct ip_set_ext *ext)
-{
- skbinfo->skbmark = ext->skbmark;
- skbinfo->skbmarkmask = ext->skbmarkmask;
- skbinfo->skbprio = ext->skbprio;
- skbinfo->skbqueue = ext->skbqueue;
-}
-
-static inline bool
-ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
-{
- return nla_put_net64(skb, IPSET_ATTR_BYTES,
- cpu_to_be64(ip_set_get_bytes(counter)),
- IPSET_ATTR_PAD) ||
- nla_put_net64(skb, IPSET_ATTR_PACKETS,
- cpu_to_be64(ip_set_get_packets(counter)),
- IPSET_ATTR_PAD);
-}
-
-static inline void
-ip_set_init_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext)
-{
- if (ext->bytes != ULLONG_MAX)
- atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
- if (ext->packets != ULLONG_MAX)
- atomic64_set(&(counter)->packets, (long long)(ext->packets));
-}
-
/* Netlink CB args */
enum {
IPSET_CB_NET = 0, /* net namespace */
@@ -431,6 +337,8 @@ extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
size_t len, size_t align);
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
+extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+ const void *e, bool active);
static inline int
ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
@@ -546,10 +454,8 @@ bitmap_bytes(u32 a, u32 b)
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_comment.h>
-
-int
-ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
- const void *e, bool active);
+#include <linux/netfilter/ipset/ip_set_counter.h>
+#include <linux/netfilter/ipset/ip_set_skbinfo.h>
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
index 5e4662a71e01..366d6c0ea04f 100644
--- a/include/linux/netfilter/ipset/ip_set_bitmap.h
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -6,8 +6,8 @@
#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
enum {
+ IPSET_ADD_STORE_PLAIN_TIMEOUT = -1,
IPSET_ADD_FAILED = 1,
- IPSET_ADD_STORE_PLAIN_TIMEOUT,
IPSET_ADD_START_STORED_TIMEOUT,
};
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8d0248525957..8e2bab1e8e90 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -20,13 +20,14 @@ ip_set_comment_uget(struct nlattr *tb)
* The kadt functions don't use the comment extensions in any way.
*/
static inline void
-ip_set_init_comment(struct ip_set_comment *comment,
+ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
const struct ip_set_ext *ext)
{
struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
size_t len = ext->comment ? strlen(ext->comment) : 0;
if (unlikely(c)) {
+ set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
kfree_rcu(c, rcu);
rcu_assign_pointer(comment->c, NULL);
}
@@ -34,16 +35,17 @@ ip_set_init_comment(struct ip_set_comment *comment,
return;
if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
len = IPSET_MAX_COMMENT_SIZE;
- c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
+ c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
if (unlikely(!c))
return;
strlcpy(c->str, ext->comment, len + 1);
+ set->ext_size += sizeof(*c) + strlen(c->str) + 1;
rcu_assign_pointer(comment->c, c);
}
/* Used only when dumping a set, protected by rcu_read_lock_bh() */
static inline int
-ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
@@ -58,13 +60,14 @@ ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
* of the set data anymore.
*/
static inline void
-ip_set_comment_free(struct ip_set_comment *comment)
+ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment)
{
struct ip_set_comment_rcu *c;
c = rcu_dereference_protected(comment->c, 1);
if (unlikely(!c))
return;
+ set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
kfree_rcu(c, rcu);
rcu_assign_pointer(comment->c, NULL);
}
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h
new file mode 100644
index 000000000000..bb6fba480118
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_counter.h
@@ -0,0 +1,75 @@
+#ifndef _IP_SET_COUNTER_H
+#define _IP_SET_COUNTER_H
+
+/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline void
+ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
+{
+ atomic64_add((long long)bytes, &(counter)->bytes);
+}
+
+static inline void
+ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
+{
+ atomic64_add((long long)packets, &(counter)->packets);
+}
+
+static inline u64
+ip_set_get_bytes(const struct ip_set_counter *counter)
+{
+ return (u64)atomic64_read(&(counter)->bytes);
+}
+
+static inline u64
+ip_set_get_packets(const struct ip_set_counter *counter)
+{
+ return (u64)atomic64_read(&(counter)->packets);
+}
+
+static inline void
+ip_set_update_counter(struct ip_set_counter *counter,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ if (ext->packets != ULLONG_MAX &&
+ !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
+ ip_set_add_bytes(ext->bytes, counter);
+ ip_set_add_packets(ext->packets, counter);
+ }
+ if (flags & IPSET_FLAG_MATCH_COUNTERS) {
+ mext->packets = ip_set_get_packets(counter);
+ mext->bytes = ip_set_get_bytes(counter);
+ }
+}
+
+static inline bool
+ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
+{
+ return nla_put_net64(skb, IPSET_ATTR_BYTES,
+ cpu_to_be64(ip_set_get_bytes(counter)),
+ IPSET_ATTR_PAD) ||
+ nla_put_net64(skb, IPSET_ATTR_PACKETS,
+ cpu_to_be64(ip_set_get_packets(counter)),
+ IPSET_ATTR_PAD);
+}
+
+static inline void
+ip_set_init_counter(struct ip_set_counter *counter,
+ const struct ip_set_ext *ext)
+{
+ if (ext->bytes != ULLONG_MAX)
+ atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
+ if (ext->packets != ULLONG_MAX)
+ atomic64_set(&(counter)->packets, (long long)(ext->packets));
+}
+
+#endif /* __KERNEL__ */
+#endif /* _IP_SET_COUNTER_H */
diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h
new file mode 100644
index 000000000000..29d7ef2bc3fa
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_skbinfo.h
@@ -0,0 +1,46 @@
+#ifndef _IP_SET_SKBINFO_H
+#define _IP_SET_SKBINFO_H
+
+/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline void
+ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ mext->skbinfo = *skbinfo;
+}
+
+static inline bool
+ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
+{
+ /* Send nonzero parameters only */
+ return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
+ nla_put_net64(skb, IPSET_ATTR_SKBMARK,
+ cpu_to_be64((u64)skbinfo->skbmark << 32 |
+ skbinfo->skbmarkmask),
+ IPSET_ATTR_PAD)) ||
+ (skbinfo->skbprio &&
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ cpu_to_be32(skbinfo->skbprio))) ||
+ (skbinfo->skbqueue &&
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ cpu_to_be16(skbinfo->skbqueue)));
+}
+
+static inline void
+ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext)
+{
+ *skbinfo = ext->skbinfo;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _IP_SET_SKBINFO_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 1d6a935c1ac5..bfb3531fd88a 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -40,7 +40,7 @@ ip_set_timeout_uget(struct nlattr *tb)
}
static inline bool
-ip_set_timeout_expired(unsigned long *t)
+ip_set_timeout_expired(const unsigned long *t)
{
return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
}
@@ -63,7 +63,7 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
}
static inline u32
-ip_set_timeout_get(unsigned long *timeout)
+ip_set_timeout_get(const unsigned long *timeout)
{
return *timeout == IPSET_ELEM_PERMANENT ? 0 :
jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h
index 40dcc82058d1..ff721d7325cf 100644
--- a/include/linux/netfilter/nf_conntrack_dccp.h
+++ b/include/linux/netfilter/nf_conntrack_dccp.h
@@ -25,7 +25,7 @@ enum ct_dccp_roles {
#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
#ifdef __KERNEL__
-#include <net/netfilter/nf_conntrack_tuple.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
struct nf_ct_dccp {
u_int8_t role[IP_CT_DIR_MAX];
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 2ad1a2b289b5..5117e4d2ddfa 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -4,6 +4,7 @@
#include <linux/netdevice.h>
#include <linux/static_key.h>
+#include <linux/netfilter.h>
#include <uapi/linux/netfilter/x_tables.h>
/* Test a struct->invflags and a boolean for inequality */
@@ -17,14 +18,9 @@
* @target: the target extension
* @matchinfo: per-match data
* @targetinfo: per-target data
- * @net network namespace through which the action was invoked
- * @in: input netdevice
- * @out: output netdevice
+ * @state: pointer to hook state this packet came from
* @fragoff: packet is a fragment, this is the data offset
* @thoff: position of transport header relative to skb->data
- * @hook: hook number given packet came from
- * @family: Actual NFPROTO_* through which the function is invoked
- * (helpful when match->family == NFPROTO_UNSPEC)
*
* Fields written to by extensions:
*
@@ -38,15 +34,47 @@ struct xt_action_param {
union {
const void *matchinfo, *targinfo;
};
- struct net *net;
- const struct net_device *in, *out;
+ const struct nf_hook_state *state;
int fragoff;
unsigned int thoff;
- unsigned int hooknum;
- u_int8_t family;
bool hotdrop;
};
+static inline struct net *xt_net(const struct xt_action_param *par)
+{
+ return par->state->net;
+}
+
+static inline struct net_device *xt_in(const struct xt_action_param *par)
+{
+ return par->state->in;
+}
+
+static inline const char *xt_inname(const struct xt_action_param *par)
+{
+ return par->state->in->name;
+}
+
+static inline struct net_device *xt_out(const struct xt_action_param *par)
+{
+ return par->state->out;
+}
+
+static inline const char *xt_outname(const struct xt_action_param *par)
+{
+ return par->state->out->name;
+}
+
+static inline unsigned int xt_hooknum(const struct xt_action_param *par)
+{
+ return par->state->hook;
+}
+
+static inline u_int8_t xt_family(const struct xt_action_param *par)
+{
+ return par->state->pf;
+}
+
/**
* struct xt_mtchk_param - parameters for match extensions'
* checkentry functions
@@ -375,38 +403,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret;
}
+struct xt_percpu_counter_alloc_state {
+ unsigned int off;
+ const char __percpu *mem;
+};
-/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
- * real (percpu) counter. On !SMP, its just the packet count,
- * so nothing needs to be done there.
- *
- * xt_percpu_counter_alloc returns the address of the percpu
- * counter, or 0 on !SMP. We force an alignment of 16 bytes
- * so that bytes/packets share a common cache line.
- *
- * Hence caller must use IS_ERR_VALUE to check for error, this
- * allows us to return 0 for single core systems without forcing
- * callers to deal with SMP vs. NONSMP issues.
- */
-static inline unsigned long xt_percpu_counter_alloc(void)
-{
- if (nr_cpu_ids > 1) {
- void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
- sizeof(struct xt_counters));
-
- if (res == NULL)
- return -ENOMEM;
-
- return (__force unsigned long) res;
- }
-
- return 0;
-}
-static inline void xt_percpu_counter_free(u64 pcnt)
-{
- if (nr_cpu_ids > 1)
- free_percpu((void __percpu *) (unsigned long) pcnt);
-}
+bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
+ struct xt_counters *counter);
+void xt_percpu_counter_free(struct xt_counters *cnt);
static inline struct xt_counters *
xt_get_this_cpu_counter(struct xt_counters *cnt)
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 33e37fb41d5d..59476061de86 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -19,6 +19,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
{
struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
+ int ret;
/* Must recheck the ingress hook head, in the event it became NULL
* after the check in nf_hook_ingress_active evaluated to true.
@@ -26,10 +27,14 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
if (unlikely(!e))
return 0;
- nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
+ nf_hook_state_init(&state, NF_NETDEV_INGRESS,
NFPROTO_NETDEV, skb->dev, NULL, NULL,
dev_net(skb->dev), NULL);
- return nf_hook_slow(skb, &state);
+ ret = nf_hook_slow(skb, &state, e);
+ if (ret == 0)
+ return -1;
+
+ return ret;
}
static inline void nf_hook_ingress_init(struct net_device *dev)
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index b25ee9ffdbe6..1828900c9411 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
struct net_device *dev = napi->dev;
if (dev && dev->npinfo) {
- spin_lock(&napi->poll_lock);
- napi->poll_owner = smp_processor_id();
+ int owner = smp_processor_id();
+
+ while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
+ cpu_relax();
+
return napi;
}
return NULL;
@@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have)
{
struct napi_struct *napi = have;
- if (napi) {
- napi->poll_owner = -1;
- spin_unlock(&napi->poll_lock);
- }
+ if (napi)
+ smp_store_release(&napi->poll_owner, -1);
}
static inline bool netpoll_tx_running(struct net_device *dev)
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 9094faf0699d..bca536341d1a 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -440,6 +440,7 @@ enum lock_type4 {
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
+#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 810124b33327..f1da8c8dd473 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -55,22 +55,18 @@ struct nfs_access_entry {
struct rcu_head rcu_head;
};
-struct nfs_lockowner {
- fl_owner_t l_owner;
- pid_t l_pid;
-};
-
struct nfs_lock_context {
atomic_t count;
struct list_head list;
struct nfs_open_context *open_context;
- struct nfs_lockowner lockowner;
+ fl_owner_t lockowner;
atomic_t io_count;
};
struct nfs4_state;
struct nfs_open_context {
struct nfs_lock_context lock_context;
+ fl_owner_t flock_owner;
struct dentry *dentry;
struct rpc_cred *cred;
struct nfs4_state *state;
@@ -344,11 +340,10 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
extern int nfs_permission(struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
-extern int nfs_attribute_timeout(struct inode *inode);
extern int nfs_attribute_cache_expired(struct inode *inode);
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
-extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
+extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
extern int nfs_setattr(struct dentry *, struct iattr *);
@@ -358,7 +353,7 @@ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
-extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
+extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp);
extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
extern void nfs_file_clear_open_context(struct file *flip);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index beb1e10f446e..348f7c158084 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -216,6 +216,20 @@ struct nfs4_get_lease_time_res {
struct nfs_fsinfo *lr_fsinfo;
};
+struct xdr_stream;
+struct nfs4_xdr_opaque_data;
+
+struct nfs4_xdr_opaque_ops {
+ void (*encode)(struct xdr_stream *, const void *args,
+ const struct nfs4_xdr_opaque_data *);
+ void (*free)(struct nfs4_xdr_opaque_data *);
+};
+
+struct nfs4_xdr_opaque_data {
+ const struct nfs4_xdr_opaque_ops *ops;
+ void *data;
+};
+
#define PNFS_LAYOUT_MAXSIZE 4096
struct nfs4_layoutdriver_data {
@@ -306,6 +320,7 @@ struct nfs4_layoutreturn_args {
struct pnfs_layout_range range;
nfs4_stateid stateid;
__u32 layout_type;
+ struct nfs4_xdr_opaque_data *ld_private;
};
struct nfs4_layoutreturn_res {
@@ -321,6 +336,7 @@ struct nfs4_layoutreturn {
struct nfs_client *clp;
struct inode *inode;
int rpc_status;
+ struct nfs4_xdr_opaque_data ld_private;
};
#define PNFS_LAYOUTSTATS_MAXSIZE 256
@@ -341,8 +357,7 @@ struct nfs42_layoutstat_devinfo {
__u64 write_count;
__u64 write_bytes;
__u32 layout_type;
- layoutstats_encode_t layoutstats_encode;
- void *layout_private;
+ struct nfs4_xdr_opaque_data ld_private;
};
struct nfs42_layoutstat_args {
@@ -418,6 +433,7 @@ struct nfs_openargs {
enum open_claim_type4 claim;
enum createmode4 createmode;
const struct nfs4_label *label;
+ umode_t umask;
};
struct nfs_openres {
@@ -469,6 +485,7 @@ struct nfs_closeargs {
fmode_t fmode;
u32 share_access;
const u32 * bitmask;
+ struct nfs4_layoutreturn_args *lr_args;
};
struct nfs_closeres {
@@ -477,6 +494,8 @@ struct nfs_closeres {
struct nfs_fattr * fattr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
+ struct nfs4_layoutreturn_res *lr_res;
+ int lr_ret;
};
/*
* * Arguments to the lock,lockt, and locku call.
@@ -549,12 +568,15 @@ struct nfs4_delegreturnargs {
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
const u32 * bitmask;
+ struct nfs4_layoutreturn_args *lr_args;
};
struct nfs4_delegreturnres {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
struct nfs_server *server;
+ struct nfs4_layoutreturn_res *lr_res;
+ int lr_ret;
};
/*
@@ -937,6 +959,7 @@ struct nfs4_create_arg {
const struct nfs_fh * dir_fh;
const u32 * bitmask;
const struct nfs4_label *label;
+ umode_t umask;
};
struct nfs4_create_res {
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a78c35cff1ae..aacca824a6ae 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,6 +7,23 @@
#include <linux/sched.h>
#include <asm/irq.h>
+/*
+ * The run state of the lockup detectors is controlled by the content of the
+ * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
+ * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
+ *
+ * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
+ * are variables that are only used as an 'interface' between the parameters
+ * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
+ * 'watchdog_thresh' variable is handled differently because its value is not
+ * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
+ * is equal zero.
+ */
+#define NMI_WATCHDOG_ENABLED_BIT 0
+#define SOFT_WATCHDOG_ENABLED_BIT 1
+#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
+#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
@@ -91,9 +108,16 @@ extern int nmi_watchdog_enabled;
extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
extern unsigned long *watchdog_cpumask_bits;
+#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif
+extern bool is_hardlockup(void);
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 6f47562d477b..de87ceac110e 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -896,7 +896,7 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
}
/**
- * ntb_mw_count() - get the number of scratchpads
+ * ntb_spad_count() - get the number of scratchpads
* @ntb: NTB device context.
*
* Hardware and topology may support a different number of scratchpads.
@@ -968,6 +968,9 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
*/
static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
{
+ if (!ntb->ops->peer_spad_read)
+ return 0;
+
return ntb->ops->peer_spad_read(ntb, idx);
}
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
new file mode 100644
index 000000000000..f21471f7ee40
--- /dev/null
+++ b/include/linux/nvme-fc-driver.h
@@ -0,0 +1,851 @@
+/*
+ * Copyright (c) 2016, Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _NVME_FC_DRIVER_H
+#define _NVME_FC_DRIVER_H 1
+
+
+/*
+ * ********************** LLDD FC-NVME Host API ********************
+ *
+ * For FC LLDD's that are the NVME Host role.
+ *
+ * ******************************************************************
+ */
+
+
+
+/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
+#define FC_PORT_ROLE_NVME_INITIATOR 0x10
+#define FC_PORT_ROLE_NVME_TARGET 0x11
+#define FC_PORT_ROLE_NVME_DISCOVERY 0x12
+
+
+/**
+ * struct nvme_fc_port_info - port-specific ids and FC connection-specific
+ * data element used during NVME Host role
+ * registrations
+ *
+ * Static fields describing the port being registered:
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
+ *
+ * Initialization values for dynamic port fields:
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ */
+struct nvme_fc_port_info {
+ u64 node_name;
+ u64 port_name;
+ u32 port_role;
+ u32 port_id;
+};
+
+
+/**
+ * struct nvmefc_ls_req - Request structure passed from NVME-FC transport
+ * to LLDD in order to perform a NVME FC-4 LS
+ * request and obtain a response.
+ *
+ * Values set by the NVME-FC layer prior to calling the LLDD ls_req
+ * entrypoint.
+ * @rqstaddr: pointer to request buffer
+ * @rqstdma: PCI DMA address of request buffer
+ * @rqstlen: Length, in bytes, of request buffer
+ * @rspaddr: pointer to response buffer
+ * @rspdma: PCI DMA address of response buffer
+ * @rsplen: Length, in bytes, of response buffer
+ * @timeout: Maximum amount of time, in seconds, to wait for the LS response.
+ * If timeout exceeded, LLDD to abort LS exchange and complete
+ * LS request with error status.
+ * @private: pointer to memory allocated alongside the ls request structure
+ * that is specifically for the LLDD to use while processing the
+ * request. The length of the buffer corresponds to the
+ * lsrqst_priv_sz value specified in the nvme_fc_port_template
+ * supplied by the LLDD.
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * the LS request. req argument is the pointer to the original LS
+ * request structure. Status argument must be 0 upon success, a
+ * negative errno on failure (example: -ENXIO).
+ */
+struct nvmefc_ls_req {
+ void *rqstaddr;
+ dma_addr_t rqstdma;
+ u32 rqstlen;
+ void *rspaddr;
+ dma_addr_t rspdma;
+ u32 rsplen;
+ u32 timeout;
+
+ void *private;
+
+ void (*done)(struct nvmefc_ls_req *req, int status);
+
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+enum nvmefc_fcp_datadir {
+ NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */
+ NVMEFC_FCP_WRITE,
+ NVMEFC_FCP_READ,
+};
+
+
+#define NVME_FC_MAX_SEGMENTS 256
+
+/**
+ * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport
+ * to LLDD in order to perform a NVME FCP IO operation.
+ *
+ * Values set by the NVME-FC layer prior to calling the LLDD fcp_io
+ * entrypoint.
+ * @cmdaddr: pointer to the FCP CMD IU buffer
+ * @rspaddr: pointer to the FCP RSP IU buffer
+ * @cmddma: PCI DMA address of the FCP CMD IU buffer
+ * @rspdma: PCI DMA address of the FCP RSP IU buffer
+ * @cmdlen: Length, in bytes, of the FCP CMD IU buffer
+ * @rsplen: Length, in bytes, of the FCP RSP IU buffer
+ * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer
+ * @sg_table: scatter/gather structure for payload data
+ * @first_sgl: memory for 1st scatter/gather list segment for payload data
+ * @sg_cnt: number of elements in the scatter/gather list
+ * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
+ * @sqid: The nvme SQID the command is being issued on
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * the FCP operation. req argument is the pointer to the original
+ * FCP IO operation.
+ * @private: pointer to memory allocated alongside the FCP operation
+ * request structure that is specifically for the LLDD to use
+ * while processing the operation. The length of the buffer
+ * corresponds to the fcprqst_priv_sz value specified in the
+ * nvme_fc_port_template supplied by the LLDD.
+ *
+ * Values set by the LLDD indicating completion status of the FCP operation.
+ * Must be set prior to calling the done() callback.
+ * @transferred_length: amount of payload data, in bytes, that were
+ * transferred. Should equal payload_length on success.
+ * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
+ * @status: Completion status of the FCP operation. must be 0 upon success,
+ * NVME_SC_FC_xxx value upon failure. Note: this is NOT a
+ * reflection of the NVME CQE completion status. Only the status
+ * of the FCP operation at the NVME-FC level.
+ */
+struct nvmefc_fcp_req {
+ void *cmdaddr;
+ void *rspaddr;
+ dma_addr_t cmddma;
+ dma_addr_t rspdma;
+ u16 cmdlen;
+ u16 rsplen;
+
+ u32 payload_length;
+ struct sg_table sg_table;
+ struct scatterlist *first_sgl;
+ int sg_cnt;
+ enum nvmefc_fcp_datadir io_dir;
+
+ __le16 sqid;
+
+ void (*done)(struct nvmefc_fcp_req *req);
+
+ void *private;
+
+ u32 transferred_length;
+ u16 rcv_rsplen;
+ u32 status;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+/*
+ * Direct copy of fc_port_state enum. For later merging
+ */
+enum nvme_fc_obj_state {
+ FC_OBJSTATE_UNKNOWN,
+ FC_OBJSTATE_NOTPRESENT,
+ FC_OBJSTATE_ONLINE,
+ FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */
+ FC_OBJSTATE_BLOCKED,
+ FC_OBJSTATE_BYPASSED,
+ FC_OBJSTATE_DIAGNOSTICS,
+ FC_OBJSTATE_LINKDOWN,
+ FC_OBJSTATE_ERROR,
+ FC_OBJSTATE_LOOPBACK,
+ FC_OBJSTATE_DELETED,
+};
+
+
+/**
+ * struct nvme_fc_local_port - structure used between NVME-FC transport and
+ * a LLDD to reference a local NVME host port.
+ * Allocated/created by the nvme_fc_register_localport()
+ * transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num: NVME-FC transport host port number
+ * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @private: pointer to memory allocated alongside the local port
+ * structure that is specifically for the LLDD to use.
+ * The length of the buffer corresponds to the local_priv_sz
+ * value specified in the nvme_fc_port_template supplied by
+ * the LLDD.
+ *
+ * Fields with dynamic values. Values may change base on link state. LLDD
+ * may reference fields directly to change them. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ * @port_state: Operational state of the port.
+ */
+struct nvme_fc_local_port {
+ /* static/read-only fields */
+ u32 port_num;
+ u32 port_role;
+ u64 node_name;
+ u64 port_name;
+
+ void *private;
+
+ /* dynamic fields */
+ u32 port_id;
+ enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvme_fc_remote_port - structure used between NVME-FC transport and
+ * a LLDD to reference a remote NVME subsystem port.
+ * Allocated/created by the nvme_fc_register_remoteport()
+ * transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num: NVME-FC transport remote subsystem port number
+ * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @localport: pointer to the NVME-FC local host port the subsystem is
+ * connected to.
+ * @private: pointer to memory allocated alongside the remote port
+ * structure that is specifically for the LLDD to use.
+ * The length of the buffer corresponds to the remote_priv_sz
+ * value specified in the nvme_fc_port_template supplied by
+ * the LLDD.
+ *
+ * Fields with dynamic values. Values may change base on link or login
+ * state. LLDD may reference fields directly to change them. Initialized by
+ * the port_info struct supplied to the registration call.
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ * @port_state: Operational state of the remote port. Valid values are
+ * ONLINE or UNKNOWN.
+ */
+struct nvme_fc_remote_port {
+ /* static fields */
+ u32 port_num;
+ u32 port_role;
+ u64 node_name;
+ u64 port_name;
+
+ struct nvme_fc_local_port *localport;
+
+ void *private;
+
+ /* dynamic fields */
+ u32 port_id;
+ enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvme_fc_port_template - structure containing static entrypoints and
+ * operational parameters for an LLDD that supports NVME host
+ * behavior. Passed by reference in port registrations.
+ * NVME-FC transport remembers template reference and may
+ * access it during runtime operation.
+ *
+ * Host/Initiator Transport Entrypoints/Parameters:
+ *
+ * @localport_delete: The LLDD initiates deletion of a localport via
+ * nvme_fc_deregister_localport(). However, the teardown is
+ * asynchronous. This routine is called upon the completion of the
+ * teardown to inform the LLDD that the localport has been deleted.
+ * Entrypoint is Mandatory.
+ *
+ * @remoteport_delete: The LLDD initiates deletion of a remoteport via
+ * nvme_fc_deregister_remoteport(). However, the teardown is
+ * asynchronous. This routine is called upon the completion of the
+ * teardown to inform the LLDD that the remoteport has been deleted.
+ * Entrypoint is Mandatory.
+ *
+ * @create_queue: Upon creating a host<->controller association, queues are
+ * created such that they can be affinitized to cpus/cores. This
+ * callback into the LLDD to notify that a controller queue is being
+ * created. The LLDD may choose to allocate an associated hw queue
+ * or map it onto a shared hw queue. Upon return from the call, the
+ * LLDD specifies a handle that will be given back to it for any
+ * command that is posted to the controller queue. The handle can
+ * be used by the LLDD to map quickly to the proper hw queue for
+ * command execution. The mask of cpu's that will map to this queue
+ * at the block-level is also passed in. The LLDD should use the
+ * queue id and/or cpu masks to ensure proper affinitization of the
+ * controller queue to the hw queue.
+ * Entrypoint is Optional.
+ *
+ * @delete_queue: This is the inverse of the crete_queue. During
+ * host<->controller association teardown, this routine is called
+ * when a controller queue is being terminated. Any association with
+ * a hw queue should be termined. If there is a unique hw queue, the
+ * hw queue should be torn down.
+ * Entrypoint is Optional.
+ *
+ * @poll_queue: Called to poll for the completion of an io on a blk queue.
+ * Entrypoint is Optional.
+ *
+ * @ls_req: Called to issue a FC-NVME FC-4 LS service request.
+ * The nvme_fc_ls_req structure will fully describe the buffers for
+ * the request payload and where to place the response payload. The
+ * LLDD is to allocate an exchange, issue the LS request, obtain the
+ * LS response, and call the "done" routine specified in the request
+ * structure (argument to done is the ls request structure itself).
+ * Entrypoint is Mandatory.
+ *
+ * @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for
+ * an admin queue or an i/o queue. The nvmefc_fcp_req structure will
+ * fully describe the io: the buffer containing the FC-NVME CMD IU
+ * (which contains the SQE), the sg list for the payload if applicable,
+ * and the buffer to place the FC-NVME RSP IU into. The LLDD will
+ * complete the i/o, indicating the amount of data transferred or
+ * any transport error, and call the "done" routine specified in the
+ * request structure (argument to done is the fcp request structure
+ * itself).
+ * Entrypoint is Mandatory.
+ *
+ * @ls_abort: called to request the LLDD to abort the indicated ls request.
+ * The call may return before the abort has completed. After aborting
+ * the request, the LLDD must still call the ls request done routine
+ * indicating an FC transport Aborted status.
+ * Entrypoint is Mandatory.
+ *
+ * @fcp_abort: called to request the LLDD to abort the indicated fcp request.
+ * The call may return before the abort has completed. After aborting
+ * the request, the LLDD must still call the fcp request done routine
+ * indicating an FC transport Aborted status.
+ * Entrypoint is Mandatory.
+ *
+ * @max_hw_queues: indicates the maximum number of hw queues the LLDD
+ * supports for cpu affinitization.
+ * Value is Mandatory. Must be at least 1.
+ *
+ * @max_sgl_segments: indicates the maximum number of sgl segments supported
+ * by the LLDD
+ * Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @max_dif_sgl_segments: indicates the maximum number of sgl segments
+ * supported by the LLDD for DIF operations.
+ * Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @dma_boundary: indicates the dma address boundary where dma mappings
+ * will be split across.
+ * Value is Mandatory. Typical value is 0xFFFFFFFF to split across
+ * 4Gig address boundarys
+ *
+ * @local_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a localport is allocated. The additional memory
+ * area solely for the of the LLDD and its location is specified by
+ * the localport->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ *
+ * @remote_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a remoteport is allocated. The additional memory
+ * area solely for the of the LLDD and its location is specified by
+ * the remoteport->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ *
+ * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a ls request structure is allocated. The additional
+ * memory area solely for the of the LLDD and its location is
+ * specified by the ls_request->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ *
+ * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a fcp request structure is allocated. The additional
+ * memory area solely for the of the LLDD and its location is
+ * specified by the fcp_request->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ */
+struct nvme_fc_port_template {
+ /* initiator-based functions */
+ void (*localport_delete)(struct nvme_fc_local_port *);
+ void (*remoteport_delete)(struct nvme_fc_remote_port *);
+ int (*create_queue)(struct nvme_fc_local_port *,
+ unsigned int qidx, u16 qsize,
+ void **handle);
+ void (*delete_queue)(struct nvme_fc_local_port *,
+ unsigned int qidx, void *handle);
+ void (*poll_queue)(struct nvme_fc_local_port *, void *handle);
+ int (*ls_req)(struct nvme_fc_local_port *,
+ struct nvme_fc_remote_port *,
+ struct nvmefc_ls_req *);
+ int (*fcp_io)(struct nvme_fc_local_port *,
+ struct nvme_fc_remote_port *,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *);
+ void (*ls_abort)(struct nvme_fc_local_port *,
+ struct nvme_fc_remote_port *,
+ struct nvmefc_ls_req *);
+ void (*fcp_abort)(struct nvme_fc_local_port *,
+ struct nvme_fc_remote_port *,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *);
+
+ u32 max_hw_queues;
+ u16 max_sgl_segments;
+ u16 max_dif_sgl_segments;
+ u64 dma_boundary;
+
+ /* sizes of additional private data for data structures */
+ u32 local_priv_sz;
+ u32 remote_priv_sz;
+ u32 lsrqst_priv_sz;
+ u32 fcprqst_priv_sz;
+};
+
+
+/*
+ * Initiator/Host functions
+ */
+
+int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_port_template *template,
+ struct device *dev,
+ struct nvme_fc_local_port **lport_p);
+
+int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport);
+
+int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
+ struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_remote_port **rport_p);
+
+int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport);
+
+
+
+/*
+ * *************** LLDD FC-NVME Target/Subsystem API ***************
+ *
+ * For FC LLDD's that are the NVME Subsystem role
+ *
+ * ******************************************************************
+ */
+
+/**
+ * struct nvmet_fc_port_info - port-specific ids and FC connection-specific
+ * data element used during NVME Subsystem role
+ * registrations
+ *
+ * Static fields describing the port being registered:
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ *
+ * Initialization values for dynamic port fields:
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ */
+struct nvmet_fc_port_info {
+ u64 node_name;
+ u64 port_name;
+ u32 port_id;
+};
+
+
+/**
+ * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC
+ * layer to represent the exchange context for
+ * a FC-NVME Link Service (LS).
+ *
+ * The structure is allocated by the LLDD whenever a LS Request is received
+ * from the FC link. The address of the structure is passed to the nvmet-fc
+ * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure
+ * will be passed back to the LLDD when the response is to be transmit.
+ * The LLDD is to use the address to map back to the LLDD exchange structure
+ * which maintains information such as the targetport the LS was received
+ * on, the remote FC NVME initiator that sent the LS, and any FC exchange
+ * context. Upon completion of the LS response transmit, the address of the
+ * structure will be passed back to the LS rsp done() routine, allowing the
+ * nvmet-fc layer to release dma resources. Upon completion of the done()
+ * routine, no further access will be made by the nvmet-fc layer and the
+ * LLDD can de-allocate the structure.
+ *
+ * Field initialization:
+ * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that
+ * is valid in the structure.
+ *
+ * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc
+ * layer will fully set the fields in order to specify the response
+ * payload buffer and its length as well as the done routine to be called
+ * upon compeletion of the transmit. The nvmet-fc layer will also set a
+ * private pointer for its own use in the done routine.
+ *
+ * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp
+ * entrypoint.
+ * @rspbuf: pointer to the LS response buffer
+ * @rspdma: PCI DMA address of the LS response buffer
+ * @rsplen: Length, in bytes, of the LS response buffer
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * transmitting the LS response. req argument is the pointer to
+ * the original ls request.
+ * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
+ * as part of the NVMET-FC processing. The LLDD is not to access
+ * this pointer.
+ */
+struct nvmefc_tgt_ls_req {
+ void *rspbuf;
+ dma_addr_t rspdma;
+ u16 rsplen;
+
+ void (*done)(struct nvmefc_tgt_ls_req *req);
+ void *nvmet_fc_private; /* LLDD is not to access !! */
+};
+
+/* Operations that NVME-FC layer may request the LLDD to perform for FCP */
+enum {
+ NVMET_FCOP_READDATA = 1, /* xmt data to initiator */
+ NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */
+ NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send
+ * rsp as well
+ */
+ NVMET_FCOP_RSP = 4, /* send rsp frame */
+ NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */
+ NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */
+ NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */
+};
+
+/**
+ * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC
+ * layer to represent the exchange context and
+ * the specific FC-NVME IU operation(s) to perform
+ * for a FC-NVME FCP IO.
+ *
+ * Structure used between LLDD and nvmet-fc layer to represent the exchange
+ * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
+ * memory transfers, and its assocated cqe transfer).
+ *
+ * The structure is allocated by the LLDD whenever a FCP CMD IU is received
+ * from the FC link. The address of the structure is passed to the nvmet-fc
+ * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure
+ * will be passed back to the LLDD for the data operations and transmit of
+ * the response. The LLDD is to use the address to map back to the LLDD
+ * exchange structure which maintains information such as the targetport
+ * the FCP I/O was received on, the remote FC NVME initiator that sent the
+ * FCP I/O, and any FC exchange context. Upon completion of the FCP target
+ * operation, the address of the structure will be passed back to the FCP
+ * op done() routine, allowing the nvmet-fc layer to release dma resources.
+ * Upon completion of the done() routine for either RSP or ABORT ops, no
+ * further access will be made by the nvmet-fc layer and the LLDD can
+ * de-allocate the structure.
+ *
+ * Field initialization:
+ * At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that
+ * is valid in the structure.
+ *
+ * When the structure is used for an FCP target operation, the nvmet-fc
+ * layer will fully set the fields in order to specify the scattergather
+ * list, the transfer length, as well as the done routine to be called
+ * upon compeletion of the operation. The nvmet-fc layer will also set a
+ * private pointer for its own use in the done routine.
+ *
+ * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !!
+ *
+ * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
+ * entrypoint.
+ * @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
+ * @hwqid: Specifies the hw queue index (0..N-1, where N is the
+ * max_hw_queues value from the LLD's nvmet_fc_target_template)
+ * that the operation is to use.
+ * @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred.
+ * Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @timeout: amount of time, in seconds, to wait for a response from the NVME
+ * host. A value of 0 is an infinite wait.
+ * Valid only for the following ops:
+ * WRITEDATA: caps the wait for data reception
+ * READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used)
+ * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload
+ * that is to be transferred.
+ * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @ba_rjt: Contains the BA_RJT payload that is to be transferred.
+ * Valid only for the NVMET_FCOP_BA_RJT op.
+ * @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data.
+ * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @sg_cnt: Number of valid entries in the scatter/gather list.
+ * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @rspaddr: pointer to the FCP RSP IU buffer to be transmit
+ * Used by RSP and READDATA_RSP ops
+ * @rspdma: PCI DMA address of the FCP RSP IU buffer
+ * Used by RSP and READDATA_RSP ops
+ * @rsplen: Length, in bytes, of the FCP RSP IU buffer
+ * Used by RSP and READDATA_RSP ops
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * the operation. req argument is the pointer to the original
+ * FCP subsystem op request.
+ * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
+ * as part of the NVMET-FC processing. The LLDD is not to
+ * reference this field.
+ *
+ * Values set by the LLDD indicating completion status of the FCP operation.
+ * Must be set prior to calling the done() callback.
+ * @transferred_length: amount of DATA_OUT payload data received by a
+ * a WRITEDATA operation. If not a WRITEDATA operation, value must
+ * be set to 0. Should equal transfer_length on success.
+ * @fcp_error: status of the FCP operation. Must be 0 on success; on failure
+ * must be a NVME_SC_FC_xxxx value.
+ */
+struct nvmefc_tgt_fcp_req {
+ u8 op;
+ u16 hwqid;
+ u32 offset;
+ u32 timeout;
+ u32 transfer_length;
+ struct fc_ba_rjt ba_rjt;
+ struct scatterlist sg[NVME_FC_MAX_SEGMENTS];
+ int sg_cnt;
+ void *rspaddr;
+ dma_addr_t rspdma;
+ u16 rsplen;
+
+ void (*done)(struct nvmefc_tgt_fcp_req *);
+
+ void *nvmet_fc_private; /* LLDD is not to access !! */
+
+ u32 transferred_length;
+ int fcp_error;
+};
+
+
+/* Target Features (Bit fields) LLDD supports */
+enum {
+ NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0),
+ /* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which
+ * sends (the last) Read Data sequence followed by the RSP
+ * sequence in one LLDD operation. Errors during Data
+ * sequence transmit must not allow RSP sequence to be sent.
+ */
+ NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1),
+ /* Bit 1: When 0, the LLDD will deliver FCP CMD
+ * on the CPU it should be affinitized to. Thus work will
+ * be scheduled on the cpu received on. When 1, the LLDD
+ * may not deliver the CMD on the CPU it should be worked
+ * on. The transport should pick a cpu to schedule the work
+ * on.
+ */
+};
+
+
+/**
+ * struct nvmet_fc_target_port - structure used between NVME-FC transport and
+ * a LLDD to reference a local NVME subsystem port.
+ * Allocated/created by the nvme_fc_register_targetport()
+ * transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num: NVME-FC transport subsytem port number
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @private: pointer to memory allocated alongside the local port
+ * structure that is specifically for the LLDD to use.
+ * The length of the buffer corresponds to the target_priv_sz
+ * value specified in the nvme_fc_target_template supplied by
+ * the LLDD.
+ *
+ * Fields with dynamic values. Values may change base on link state. LLDD
+ * may reference fields directly to change them. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ * @port_state: Operational state of the port.
+ */
+struct nvmet_fc_target_port {
+ /* static/read-only fields */
+ u32 port_num;
+ u64 node_name;
+ u64 port_name;
+
+ void *private;
+
+ /* dynamic fields */
+ u32 port_id;
+ enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvmet_fc_target_template - structure containing static entrypoints
+ * and operational parameters for an LLDD that supports NVME
+ * subsystem behavior. Passed by reference in port
+ * registrations. NVME-FC transport remembers template
+ * reference and may access it during runtime operation.
+ *
+ * Subsystem/Target Transport Entrypoints/Parameters:
+ *
+ * @targetport_delete: The LLDD initiates deletion of a targetport via
+ * nvmet_fc_unregister_targetport(). However, the teardown is
+ * asynchronous. This routine is called upon the completion of the
+ * teardown to inform the LLDD that the targetport has been deleted.
+ * Entrypoint is Mandatory.
+ *
+ * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
+ * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange
+ * structure specified in the nvmet_fc_rcv_ls_req() call made when
+ * the LS request was received. The structure will fully describe
+ * the buffers for the response payload and the dma address of the
+ * payload. The LLDD is to transmit the response (or return a non-zero
+ * errno status), and upon completion of the transmit, call the
+ * "done" routine specified in the nvmefc_tgt_ls_req structure
+ * (argument to done is the ls reqwuest structure itself).
+ * After calling the done routine, the LLDD shall consider the
+ * LS handling complete and the nvmefc_tgt_ls_req structure may
+ * be freed/released.
+ * Entrypoint is Mandatory.
+ *
+ * @fcp_op: Called to perform a data transfer, transmit a response, or
+ * abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same
+ * LLDD-supplied exchange structure specified in the
+ * nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received.
+ * The op field in the structure shall indicate the operation for
+ * the LLDD to perform relative to the io.
+ * NVMET_FCOP_READDATA operation: the LLDD is to send the
+ * payload data (described by sglist) to the host in 1 or
+ * more FC sequences (preferrably 1). Note: the fc-nvme layer
+ * may call the READDATA operation multiple times for longer
+ * payloads.
+ * NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the
+ * payload data (described by sglist) from the host via 1 or
+ * more FC sequences (preferrably 1). The LLDD is to generate
+ * the XFER_RDY IU(s) corresponding to the data being requested.
+ * Note: the FC-NVME layer may call the WRITEDATA operation
+ * multiple times for longer payloads.
+ * NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the
+ * payload data (described by sglist) to the host in 1 or
+ * more FC sequences (preferrably 1). If an error occurs during
+ * payload data transmission, the LLDD is to set the
+ * nvmefc_tgt_fcp_req fcp_error and transferred_length field, then
+ * consider the operation complete. On error, the LLDD is to not
+ * transmit the FCP_RSP iu. If all payload data is transferred
+ * successfully, the LLDD is to update the nvmefc_tgt_fcp_req
+ * transferred_length field and may subsequently transmit the
+ * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
+ * The LLDD is to await FCP_CONF reception to confirm the RSP
+ * reception by the host. The LLDD may retramsit the FCP_RSP iu
+ * if necessary per FC-NVME. Upon reception of FCP_CONF, or upon
+ * FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req
+ * fcp_error field and consider the operation complete..
+ * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
+ * (described by rspbuf, rspdma, rsplen). The LLDD is to await
+ * FCP_CONF reception to confirm the RSP reception by the host.
+ * The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME.
+ * Upon reception of FCP_CONF, or upon FCP_CONF failure, the
+ * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
+ * consider the operation complete..
+ * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
+ * corresponding to the fcp operation. The LLDD shall send
+ * ABTS and follow FC exchange abort-multi rules, including
+ * ABTS retries and possible logout.
+ * Upon completing the indicated operation, the LLDD is to set the
+ * status fields for the operation (tranferred_length and fcp_error
+ * status) in the request, then all the "done" routine
+ * indicated in the fcp request. Upon return from the "done"
+ * routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation
+ * the fc-nvme layer will not longer reference the fcp request,
+ * allowing the LLDD to free/release the fcp request.
+ * Note: when calling the done routine for READDATA or WRITEDATA
+ * operations, the fc-nvme layer may immediate convert, in the same
+ * thread and before returning to the LLDD, the fcp operation to
+ * the next operation for the fcp io and call the LLDDs fcp_op
+ * call again. If fields in the fcp request are to be accessed post
+ * the done call, the LLDD should save their values prior to calling
+ * the done routine, and inspect the save values after the done
+ * routine.
+ * Returns 0 on success, -<errno> on failure (Ex: -EIO)
+ * Entrypoint is Mandatory.
+ *
+ * @max_hw_queues: indicates the maximum number of hw queues the LLDD
+ * supports for cpu affinitization.
+ * Value is Mandatory. Must be at least 1.
+ *
+ * @max_sgl_segments: indicates the maximum number of sgl segments supported
+ * by the LLDD
+ * Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @max_dif_sgl_segments: indicates the maximum number of sgl segments
+ * supported by the LLDD for DIF operations.
+ * Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @dma_boundary: indicates the dma address boundary where dma mappings
+ * will be split across.
+ * Value is Mandatory. Typical value is 0xFFFFFFFF to split across
+ * 4Gig address boundarys
+ *
+ * @target_features: The LLDD sets bits in this field to correspond to
+ * optional features that are supported by the LLDD.
+ * Refer to the NVMET_FCTGTFEAT_xxx values.
+ * Value is Mandatory. Allowed to be zero.
+ *
+ * @target_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a targetport is allocated. The additional memory
+ * area solely for the of the LLDD and its location is specified by
+ * the targetport->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ */
+struct nvmet_fc_target_template {
+ void (*targetport_delete)(struct nvmet_fc_target_port *tgtport);
+ int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_ls_req *tls_req);
+ int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *);
+
+ u32 max_hw_queues;
+ u16 max_sgl_segments;
+ u16 max_dif_sgl_segments;
+ u64 dma_boundary;
+
+ u32 target_features;
+
+ u32 target_priv_sz;
+};
+
+
+int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo,
+ struct nvmet_fc_target_template *template,
+ struct device *dev,
+ struct nvmet_fc_target_port **tgtport_p);
+
+int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport);
+
+int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_ls_req *lsreq,
+ void *lsreqbuf, u32 lsreqbuf_len);
+
+int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq,
+ void *cmdiubuf, u32 cmdiubuf_len);
+
+#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
new file mode 100644
index 000000000000..4b45226bd604
--- /dev/null
+++ b/include/linux/nvme-fc.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+
+/*
+ * This file contains definitions relative to FC-NVME r1.11 and a few
+ * newer items
+ */
+
+#ifndef _NVME_FC_H
+#define _NVME_FC_H 1
+
+
+#define NVME_CMD_SCSI_ID 0xFD
+#define NVME_CMD_FC_ID FC_TYPE_NVME
+
+/* FC-NVME Cmd IU Flags */
+#define FCNVME_CMD_FLAGS_DIRMASK 0x03
+#define FCNVME_CMD_FLAGS_WRITE 0x01
+#define FCNVME_CMD_FLAGS_READ 0x02
+
+struct nvme_fc_cmd_iu {
+ __u8 scsi_id;
+ __u8 fc_id;
+ __be16 iu_len;
+ __u8 rsvd4[3];
+ __u8 flags;
+ __be64 connection_id;
+ __be32 csn;
+ __be32 data_len;
+ struct nvme_command sqe;
+ __be32 rsvd88[2];
+};
+
+#define NVME_FC_SIZEOF_ZEROS_RSP 12
+
+struct nvme_fc_ersp_iu {
+ __u8 rsvd0[2];
+ __be16 iu_len;
+ __be32 rsn;
+ __be32 xfrd_len;
+ __be32 rsvd12;
+ struct nvme_completion cqe;
+ /* for now - no additional payload */
+};
+
+
+/* FC-NVME r1.03/16-119v0 NVME Link Services */
+enum {
+ FCNVME_LS_RSVD = 0,
+ FCNVME_LS_RJT = 1,
+ FCNVME_LS_ACC = 2,
+ FCNVME_LS_CREATE_ASSOCIATION = 3,
+ FCNVME_LS_CREATE_CONNECTION = 4,
+ FCNVME_LS_DISCONNECT = 5,
+};
+
+/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */
+enum {
+ FCNVME_LSDESC_RSVD = 0x0,
+ FCNVME_LSDESC_RQST = 0x1,
+ FCNVME_LSDESC_RJT = 0x2,
+ FCNVME_LSDESC_CREATE_ASSOC_CMD = 0x3,
+ FCNVME_LSDESC_CREATE_CONN_CMD = 0x4,
+ FCNVME_LSDESC_DISCONN_CMD = 0x5,
+ FCNVME_LSDESC_CONN_ID = 0x6,
+ FCNVME_LSDESC_ASSOC_ID = 0x7,
+};
+
+
+/* ********** start of Link Service Descriptors ********** */
+
+
+/*
+ * fills in length of a descriptor. Struture minus descriptor header
+ */
+static inline __be32 fcnvme_lsdesc_len(size_t sz)
+{
+ return cpu_to_be32(sz - (2 * sizeof(u32)));
+}
+
+
+struct fcnvme_ls_rqst_w0 {
+ u8 ls_cmd; /* FCNVME_LS_xxx */
+ u8 zeros[3];
+};
+
+/* FCNVME_LSDESC_RQST */
+struct fcnvme_lsdesc_rqst {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 rsvd12;
+};
+
+
+
+
+/* FCNVME_LSDESC_RJT */
+struct fcnvme_lsdesc_rjt {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ u8 rsvd8;
+
+ /*
+ * Reject reason and explanaction codes are generic
+ * to ELs's from LS-3.
+ */
+ u8 reason_code;
+ u8 reason_explanation;
+
+ u8 vendor;
+ __be32 rsvd12;
+};
+
+
+#define FCNVME_ASSOC_HOSTID_LEN 64
+#define FCNVME_ASSOC_HOSTNQN_LEN 256
+#define FCNVME_ASSOC_SUBNQN_LEN 256
+
+/* FCNVME_LSDESC_CREATE_ASSOC_CMD */
+struct fcnvme_lsdesc_cr_assoc_cmd {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ __be16 ersp_ratio;
+ __be16 rsvd10;
+ __be32 rsvd12[9];
+ __be16 cntlid;
+ __be16 sqsize;
+ __be32 rsvd52;
+ u8 hostid[FCNVME_ASSOC_HOSTID_LEN];
+ u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
+ u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN];
+ u8 rsvd632[384];
+};
+
+/* FCNVME_LSDESC_CREATE_CONN_CMD */
+struct fcnvme_lsdesc_cr_conn_cmd {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ __be16 ersp_ratio;
+ __be16 rsvd10;
+ __be32 rsvd12[9];
+ __be16 qid;
+ __be16 sqsize;
+ __be32 rsvd52;
+};
+
+/* Disconnect Scope Values */
+enum {
+ FCNVME_DISCONN_ASSOCIATION = 0,
+ FCNVME_DISCONN_CONNECTION = 1,
+};
+
+/* FCNVME_LSDESC_DISCONN_CMD */
+struct fcnvme_lsdesc_disconn_cmd {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ u8 rsvd8[3];
+ /* note: scope is really a 1 bit field */
+ u8 scope; /* FCNVME_DISCONN_xxx */
+ __be32 rsvd12;
+ __be64 id;
+};
+
+/* FCNVME_LSDESC_CONN_ID */
+struct fcnvme_lsdesc_conn_id {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ __be64 connection_id;
+};
+
+/* FCNVME_LSDESC_ASSOC_ID */
+struct fcnvme_lsdesc_assoc_id {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ __be64 association_id;
+};
+
+/* r_ctl values */
+enum {
+ FCNVME_RS_RCTL_DATA = 1,
+ FCNVME_RS_RCTL_XFER_RDY = 5,
+ FCNVME_RS_RCTL_RSP = 8,
+};
+
+
+/* ********** start of Link Services ********** */
+
+
+/* FCNVME_LS_RJT */
+struct fcnvme_ls_rjt {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_rqst rqst;
+ struct fcnvme_lsdesc_rjt rjt;
+};
+
+/* FCNVME_LS_ACC */
+struct fcnvme_ls_acc_hdr {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_rqst rqst;
+ /* Followed by cmd-specific ACC descriptors, see next definitions */
+};
+
+/* FCNVME_LS_CREATE_ASSOCIATION */
+struct fcnvme_ls_cr_assoc_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_cr_assoc_cmd assoc_cmd;
+};
+
+struct fcnvme_ls_cr_assoc_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_conn_id connectid;
+};
+
+
+/* FCNVME_LS_CREATE_CONNECTION */
+struct fcnvme_ls_cr_conn_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_cr_conn_cmd connect_cmd;
+};
+
+struct fcnvme_ls_cr_conn_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+ struct fcnvme_lsdesc_conn_id connectid;
+};
+
+/* FCNVME_LS_DISCONNECT */
+struct fcnvme_ls_disconnect_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_disconn_cmd discon_cmd;
+};
+
+struct fcnvme_ls_disconnect_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+};
+
+
+/*
+ * Yet to be defined in FC-NVME:
+ */
+#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */
+#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
+#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
+
+
+#endif /* _NVME_FC_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index fc3c24206593..3d1c6f1b15c9 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -242,6 +242,7 @@ enum {
NVME_CTRL_ONCS_COMPARE = 1 << 0,
NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
NVME_CTRL_ONCS_DSM = 1 << 2,
+ NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
NVME_CTRL_VWC_PRESENT = 1 << 0,
};
@@ -558,6 +559,23 @@ struct nvme_dsm_range {
__le64 slba;
};
+struct nvme_write_zeroes_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2;
+ __le64 metadata;
+ union nvme_data_ptr dptr;
+ __le64 slba;
+ __le16 length;
+ __le16 control;
+ __le32 dsmgmt;
+ __le32 reftag;
+ __le16 apptag;
+ __le16 appmask;
+};
+
/* Admin commands */
enum nvme_admin_opcode {
@@ -857,6 +875,7 @@ struct nvme_command {
struct nvme_download_firmware dlfw;
struct nvme_format_cmd format;
struct nvme_dsm_cmd dsm;
+ struct nvme_write_zeroes_cmd write_zeroes;
struct nvme_abort_cmd abort;
struct nvme_get_log_page_command get_log_page;
struct nvmf_common_command fabrics;
@@ -947,6 +966,7 @@ enum {
NVME_SC_BAD_ATTRIBUTES = 0x180,
NVME_SC_INVALID_PI = 0x181,
NVME_SC_READ_ONLY = 0x182,
+ NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
/*
* I/O Command Set Specific - Fabrics commands:
@@ -973,17 +993,30 @@ enum {
NVME_SC_UNWRITTEN_BLOCK = 0x287,
NVME_SC_DNR = 0x4000,
+
+
+ /*
+ * FC Transport-specific error status values for NVME commands
+ *
+ * Transport-specific status code values must be in the range 0xB0..0xBF
+ */
+
+ /* Generic FC failure - catchall */
+ NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
+
+ /* I/O failure due to FC ABTS'd */
+ NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
};
struct nvme_completion {
/*
* Used by Admin and Fabrics commands to return data:
*/
- union {
- __le16 result16;
- __le32 result;
- __le64 result64;
- };
+ union nvme_result {
+ __le16 u16;
+ __le32 u32;
+ __le64 u64;
+ } result;
__le16 sq_head; /* how much of this queue may be reclaimed */
__le16 sq_id; /* submission queue that generated this entry */
__u16 command_id; /* of the command which completed */
diff --git a/include/linux/of.h b/include/linux/of.h
index 299aeb192727..d72f01009297 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1266,6 +1266,18 @@ static inline bool of_device_is_system_power_controller(const struct device_node
* Overlay support
*/
+enum of_overlay_notify_action {
+ OF_OVERLAY_PRE_APPLY,
+ OF_OVERLAY_POST_APPLY,
+ OF_OVERLAY_PRE_REMOVE,
+ OF_OVERLAY_POST_REMOVE,
+};
+
+struct of_overlay_notify_data {
+ struct device_node *overlay;
+ struct device_node *target;
+};
+
#ifdef CONFIG_OF_OVERLAY
/* ID based overlays; the API for external users */
@@ -1273,6 +1285,9 @@ int of_overlay_create(struct device_node *tree);
int of_overlay_destroy(int id);
int of_overlay_destroy_all(void);
+int of_overlay_notifier_register(struct notifier_block *nb);
+int of_overlay_notifier_unregister(struct notifier_block *nb);
+
#else
static inline int of_overlay_create(struct device_node *tree)
@@ -1290,6 +1305,16 @@ static inline int of_overlay_destroy_all(void)
return -ENOTSUPP;
}
+static inline int of_overlay_notifier_register(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int of_overlay_notifier_unregister(struct notifier_block *nb)
+{
+ return 0;
+}
+
#endif
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 4341f32516d8..271b3fdf0070 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -71,6 +71,7 @@ extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
+extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index e80b9c762a03..6a7fc5051099 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -31,8 +31,16 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
-void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
-const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+static inline void of_iommu_set_ops(struct device_node *np,
+ const struct iommu_ops *ops)
+{
+ iommu_register_instance(&np->fwnode, ops);
+}
+
+static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
+{
+ return iommu_get_instance(&np->fwnode);
+}
extern struct of_device_id __iommu_of_table;
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 7fd5cfce9140..0e0974eceb80 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
+int of_pci_get_max_link_speed(struct device_node *node);
void of_pci_check_probe_only(void);
int of_pci_map_rid(struct device_node *np, u32 rid,
const char *map_name, const char *map_mask_name,
@@ -62,6 +63,12 @@ static inline int of_pci_map_rid(struct device_node *np, u32 rid,
return -EINVAL;
}
+static inline int
+of_pci_get_max_link_speed(struct device_node *node)
+{
+ return -EINVAL;
+}
+
static inline void of_pci_check_probe_only(void) { }
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 74e4dda91238..6b5818d6de32 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -79,6 +79,7 @@ enum pageflags {
PG_dirty,
PG_lru,
PG_active,
+ PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
PG_arch_1,
@@ -87,7 +88,6 @@ enum pageflags {
PG_private_2, /* If pagecache, has fs aux data */
PG_writeback, /* Page is under writeback */
PG_head, /* A head page */
- PG_swapcache, /* Swap page: swp_entry_t in private */
PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
@@ -110,6 +110,9 @@ enum pageflags {
/* Filesystems */
PG_checked = PG_owner_priv_1,
+ /* SwapBacked */
+ PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+
/* Two page bits are conscripted by FS-Cache to maintain local caching
* state. These bits are set on pages belonging to the netfs's inodes
* when those inodes are being locally cached.
@@ -167,6 +170,9 @@ static __always_inline int PageCompound(struct page *page)
* for compound page all operations related to the page flag applied to
* head page.
*
+ * PF_ONLY_HEAD:
+ * for compound page, callers only ever operate on the head page.
+ *
* PF_NO_TAIL:
* modifications of the page flag must be done on small or head pages,
* checks can be done on tail pages too.
@@ -176,6 +182,9 @@ static __always_inline int PageCompound(struct page *page)
*/
#define PF_ANY(page, enforce) page
#define PF_HEAD(page, enforce) compound_head(page)
+#define PF_ONLY_HEAD(page, enforce) ({ \
+ VM_BUG_ON_PGFLAGS(PageTail(page), page); \
+ page;})
#define PF_NO_TAIL(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
compound_head(page);})
@@ -253,6 +262,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
+PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
PAGEFLAG(Referenced, referenced, PF_HEAD)
TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
@@ -314,7 +324,13 @@ PAGEFLAG_FALSE(HighMem)
#endif
#ifdef CONFIG_SWAP
-PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
+static __always_inline int PageSwapCache(struct page *page)
+{
+ return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
+
+}
+SETPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
+CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
#else
PAGEFLAG_FALSE(SwapCache)
#endif
@@ -701,12 +717,12 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* Flags checked when a page is freed. Pages being freed should not have
* these flags set. It they are, there is a problem.
*/
-#define PAGE_FLAGS_CHECK_AT_FREE \
- (1UL << PG_lru | 1UL << PG_locked | \
- 1UL << PG_private | 1UL << PG_private_2 | \
- 1UL << PG_writeback | 1UL << PG_reserved | \
- 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \
- 1UL << PG_unevictable | __PG_MLOCKED)
+#define PAGE_FLAGS_CHECK_AT_FREE \
+ (1UL << PG_lru | 1UL << PG_locked | \
+ 1UL << PG_private | 1UL << PG_private_2 | \
+ 1UL << PG_writeback | 1UL << PG_reserved | \
+ 1UL << PG_slab | 1UL << PG_active | \
+ 1UL << PG_unevictable | __PG_MLOCKED)
/*
* Flags checked when a page is prepped for return by the page allocator.
@@ -735,6 +751,7 @@ static inline int page_has_private(struct page *page)
#undef PF_ANY
#undef PF_HEAD
+#undef PF_ONLY_HEAD
#undef PF_NO_TAIL
#undef PF_NO_COMPOUND
#endif /* !__GENERATING_BOUNDS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7dbe9148b2f8..324c8dbad1e1 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -9,7 +9,7 @@
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/compiler.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/hardirq.h> /* for in_interrupt() */
@@ -486,22 +486,14 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
* and for filesystems which need to wait on PG_private.
*/
extern void wait_on_page_bit(struct page *page, int bit_nr);
-
extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable_timeout(struct page *page,
- int bit_nr, unsigned long timeout);
-
-static inline int wait_on_page_locked_killable(struct page *page)
-{
- if (!PageLocked(page))
- return 0;
- return wait_on_page_bit_killable(compound_head(page), PG_locked);
-}
+extern void wake_up_page_bit(struct page *page, int bit_nr);
-extern wait_queue_head_t *page_waitqueue(struct page *page);
static inline void wake_up_page(struct page *page, int bit)
{
- __wake_up_bit(page_waitqueue(page), &page->flags, bit);
+ if (!PageWaiters(page))
+ return;
+ wake_up_page_bit(page, bit);
}
/*
@@ -517,6 +509,13 @@ static inline void wait_on_page_locked(struct page *page)
wait_on_page_bit(compound_head(page), PG_locked);
}
+static inline int wait_on_page_locked_killable(struct page *page)
+{
+ if (!PageLocked(page))
+ return 0;
+ return wait_on_page_bit_killable(compound_head(page), PG_locked);
+}
+
/*
* Wait for a page to complete writeback
*/
diff --git a/include/linux/parser.h b/include/linux/parser.h
index 39d5b7955b23..884c1e6eb3fe 100644
--- a/include/linux/parser.h
+++ b/include/linux/parser.h
@@ -27,6 +27,7 @@ typedef struct {
int match_token(char *, const match_table_t table, substring_t args[]);
int match_int(substring_t *, int *result);
+int match_u64(substring_t *, u64 *result);
int match_octal(substring_t *, int *result);
int match_hex(substring_t *, int *result);
bool match_wildcard(const char *pattern, const char *str);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 7d63a66e8ed4..7a4e83a8c89c 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -24,7 +24,9 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
}
extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
-extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res);
+struct pci_ecam_ops;
+extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
+ struct pci_ecam_ops **ecam_ops);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 7adad206b1f4..f0d2b9451270 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -59,6 +59,15 @@ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
/* default ECAM ops */
extern struct pci_ecam_ops pci_generic_ecam_ops;
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
+extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
+extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
+extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
+extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
+extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+#endif
+
#ifdef CONFIG_PCI_HOST_GENERIC
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a38772a85588..e2d1a124216a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -244,6 +244,7 @@ struct pci_cap_saved_state {
struct pci_cap_saved_data cap;
};
+struct irq_affinity;
struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
@@ -332,7 +333,6 @@ struct pci_dev {
* directly, use the values stored here. They might be different!
*/
unsigned int irq;
- struct cpumask *irq_affinity;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
bool match_driver; /* Skip attaching driver */
@@ -420,9 +420,13 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
struct pci_host_bridge {
struct device dev;
struct pci_bus *bus; /* root bus */
+ struct pci_ops *ops;
+ void *sysdata;
+ int busnr;
struct list_head windows; /* resource_entry */
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
+ struct msi_controller *msi;
unsigned int ignore_reset_delay:1; /* for entire hierarchy */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
@@ -430,10 +434,23 @@ struct pci_host_bridge {
resource_size_t start,
resource_size_t size,
resource_size_t align);
+ unsigned long private[0] ____cacheline_aligned;
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
+static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
+{
+ return (void *)bridge->private;
+}
+
+static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
+{
+ return container_of(priv, struct pci_host_bridge, private);
+}
+
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
+int pci_register_host_bridge(struct pci_host_bridge *bridge);
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
@@ -1310,8 +1327,10 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
-int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags);
+int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *affd);
+
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
@@ -1339,14 +1358,17 @@ static inline int pci_enable_msix_range(struct pci_dev *dev,
static inline int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{ return -ENOSYS; }
-static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
- unsigned int min_vecs, unsigned int max_vecs,
- unsigned int flags)
+
+static inline int
+pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *aff_desc)
{
if (min_vecs > 1)
return -EINVAL;
return 1;
}
+
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
}
@@ -1364,6 +1386,14 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
}
#endif
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
+ NULL);
+}
+
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
extern bool pcie_ports_auto;
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 8c7895061121..2e855afa0212 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -176,6 +176,7 @@ struct hotplug_params {
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
+bool pciehp_is_native(struct pci_dev *pdev);
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(acpi_handle handle);
@@ -185,5 +186,6 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
{
return -ENODEV;
}
+static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; }
#endif
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c58752fe16c4..73dda0edcb97 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -23,8 +23,10 @@
#define PCI_CLASS_STORAGE_SATA 0x0106
#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601
#define PCI_CLASS_STORAGE_SAS 0x0107
+#define PCI_CLASS_STORAGE_EXPRESS 0x010802
#define PCI_CLASS_STORAGE_OTHER 0x0180
+
#define PCI_BASE_CLASS_NETWORK 0x02
#define PCI_CLASS_NETWORK_ETHERNET 0x0200
#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
@@ -2251,17 +2253,35 @@
#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
#define PCI_VENDOR_ID_VMWARE 0x15ad
+#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0
#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
#define PCI_VENDOR_ID_MELLANOX 0x15b3
-#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX3 0x1003
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO 0x1007
+#define PCI_DEVICE_ID_MELLANOX_CONNECTIB 0x1011
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX4 0x1013
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX 0x1015
+#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46
-#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
-#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
-#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
-#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
+#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
+#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
+#define PCI_DEVICE_ID_MELLANOX_HERMON_SDR 0x6340
+#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR 0x634a
+#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR 0x6354
+#define PCI_DEVICE_ID_MELLANOX_HERMON_EN 0x6368
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN 0x6372
+#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2 0x6732
+#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2 0x673c
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2 0x6746
+#define PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2 0x6750
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2 0x675a
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2 0x6764
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX2 0x676e
#define PCI_VENDOR_ID_DFI 0x15bd
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e25f1830fbcf..f7d95f644eed 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,6 +25,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
+#include <linux/phy_led_triggers.h>
#include <linux/atomic.h>
@@ -85,6 +86,21 @@ typedef enum {
} phy_interface_t;
/**
+ * phy_supported_speeds - return all speeds currently supported by a phy device
+ * @phy: The phy device to return supported speeds of.
+ * @speeds: buffer to store supported speeds in.
+ * @size: size of speeds buffer.
+ *
+ * Description: Returns the number of supported speeds, and
+ * fills the speeds * buffer with the supported speeds. If speeds buffer is
+ * too small to contain * all currently supported speeds, will return as
+ * many speeds as can fit.
+ */
+unsigned int phy_supported_speeds(struct phy_device *phy,
+ unsigned int *speeds,
+ unsigned int size);
+
+/**
* It maps 'enum phy_interface_t' found in include/linux/phy.h
* into the device tree binding of 'phy-mode', so that Ethernet
* device driver can get phy interface from device tree.
@@ -343,7 +359,7 @@ struct phy_c45_device_ids {
* giving up on the current attempt at acquiring a link
* irq: IRQ number of the PHY's interrupt (-1 if none)
* phy_timer: The timer for handling the state machine
- * phy_queue: A work_queue for the interrupt
+ * phy_queue: A work_queue for the phy_mac_interrupt
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
@@ -401,10 +417,19 @@ struct phy_device {
u32 advertising;
u32 lp_advertising;
+ /* Energy efficient ethernet modes which should be prohibited */
+ u32 eee_broken_modes;
+
int autoneg;
int link_timeout;
+#ifdef CONFIG_LED_TRIGGER_PHY
+ struct phy_led_trigger *phy_led_triggers;
+ unsigned int phy_num_led_triggers;
+ struct phy_led_trigger *last_triggered;
+#endif
+
/*
* Interrupt number for this PHY
* -1 means no interrupt
@@ -425,6 +450,7 @@ struct phy_device {
struct net_device *attached_dev;
u8 mdix;
+ u8 mdix_ctrl;
void (*adjust_link)(struct net_device *dev);
};
@@ -589,6 +615,13 @@ struct phy_driver {
void (*get_strings)(struct phy_device *dev, u8 *data);
void (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
+
+ /* Get and Set PHY tunables */
+ int (*get_tunable)(struct phy_device *dev,
+ struct ethtool_tunable *tuna, void *data);
+ int (*set_tunable)(struct phy_device *dev,
+ struct ethtool_tunable *tuna,
+ const void *data);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -764,6 +797,7 @@ void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
+int phy_aneg_done(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
@@ -802,7 +836,8 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change(struct work_struct *work);
+void phy_change(struct phy_device *phydev);
+void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
@@ -825,6 +860,10 @@ int phy_register_fixup_for_id(const char *bus_id,
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
+int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask);
+int phy_unregister_fixup_for_id(const char *bus_id);
+int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
+
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
@@ -836,6 +875,7 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd);
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
+int phy_ethtool_nway_reset(struct net_device *ndev);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
index 9d18e9f948e9..35c070ea6ea3 100644
--- a/include/linux/phy/phy-qcom-ufs.h
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -18,22 +18,6 @@
#include "phy.h"
/**
- * ufs_qcom_phy_enable_ref_clk() - Enable the phy
- * ref clock.
- * @phy: reference to a generic phy
- *
- * returns 0 for success, and non-zero for error.
- */
-int ufs_qcom_phy_enable_ref_clk(struct phy *phy);
-
-/**
- * ufs_qcom_phy_disable_ref_clk() - Disable the phy
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_disable_ref_clk(struct phy *phy);
-
-/**
* ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
* ref clock.
* @phy: reference to a generic phy.
@@ -47,8 +31,6 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
*/
void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
-int ufs_qcom_phy_enable_iface_clk(struct phy *phy);
-void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
int ufs_qcom_phy_start_serdes(struct phy *phy);
int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
new file mode 100644
index 000000000000..a2daea0a37d2
--- /dev/null
+++ b/include/linux/phy_led_triggers.h
@@ -0,0 +1,51 @@
+/* Copyright (C) 2016 National Instruments Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __PHY_LED_TRIGGERS
+#define __PHY_LED_TRIGGERS
+
+struct phy_device;
+
+#ifdef CONFIG_LED_TRIGGER_PHY
+
+#include <linux/leds.h>
+
+#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
+#define PHY_MII_BUS_ID_SIZE (20 - 3)
+
+#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
+ FIELD_SIZEOF(struct mdio_device, addr)+\
+ PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
+
+struct phy_led_trigger {
+ struct led_trigger trigger;
+ char name[PHY_LINK_LED_TRIGGER_NAME_SIZE];
+ unsigned int speed;
+};
+
+
+extern int phy_led_triggers_register(struct phy_device *phy);
+extern void phy_led_triggers_unregister(struct phy_device *phy);
+extern void phy_led_trigger_change_speed(struct phy_device *phy);
+
+#else
+
+static inline int phy_led_triggers_register(struct phy_device *phy)
+{
+ return 0;
+}
+static inline void phy_led_triggers_unregister(struct phy_device *phy) { }
+static inline void phy_led_trigger_change_speed(struct phy_device *phy) { }
+
+#endif
+
+#endif
diff --git a/include/linux/pim.h b/include/linux/pim.h
index e1d756f81348..0e81b2778ae0 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -1,6 +1,7 @@
#ifndef __LINUX_PIM_H
#define __LINUX_PIM_H
+#include <linux/skbuff.h>
#include <asm/byteorder.h>
/* Message types - V1 */
@@ -9,24 +10,86 @@
/* Message types - V2 */
#define PIM_VERSION 2
-#define PIM_REGISTER 1
+
+/* RFC7761, sec 4.9:
+ * Type
+ * Types for specific PIM messages. PIM Types are:
+ *
+ * Message Type Destination
+ * ---------------------------------------------------------------------
+ * 0 = Hello Multicast to ALL-PIM-ROUTERS
+ * 1 = Register Unicast to RP
+ * 2 = Register-Stop Unicast to source of Register
+ * packet
+ * 3 = Join/Prune Multicast to ALL-PIM-ROUTERS
+ * 4 = Bootstrap Multicast to ALL-PIM-ROUTERS
+ * 5 = Assert Multicast to ALL-PIM-ROUTERS
+ * 6 = Graft (used in PIM-DM only) Unicast to RPF'(S)
+ * 7 = Graft-Ack (used in PIM-DM only) Unicast to source of Graft
+ * packet
+ * 8 = Candidate-RP-Advertisement Unicast to Domain's BSR
+ */
+enum {
+ PIM_TYPE_HELLO,
+ PIM_TYPE_REGISTER,
+ PIM_TYPE_REGISTER_STOP,
+ PIM_TYPE_JOIN_PRUNE,
+ PIM_TYPE_BOOTSTRAP,
+ PIM_TYPE_ASSERT,
+ PIM_TYPE_GRAFT,
+ PIM_TYPE_GRAFT_ACK,
+ PIM_TYPE_CANDIDATE_RP_ADV
+};
#define PIM_NULL_REGISTER cpu_to_be32(0x40000000)
-static inline bool ipmr_pimsm_enabled(void)
-{
- return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
-}
+/* RFC7761, sec 4.9:
+ * The PIM header common to all PIM messages is:
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type | Reserved | Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct pimhdr {
+ __u8 type;
+ __u8 reserved;
+ __be16 csum;
+};
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
-struct pimreghdr
-{
+struct pimreghdr {
__u8 type;
__u8 reserved;
__be16 csum;
__be32 flags;
};
-struct sk_buff;
-extern int pim_rcv_v1(struct sk_buff *);
+int pim_rcv_v1(struct sk_buff *skb);
+
+static inline bool ipmr_pimsm_enabled(void)
+{
+ return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
+}
+
+static inline struct pimhdr *pim_hdr(const struct sk_buff *skb)
+{
+ return (struct pimhdr *)skb_transport_header(skb);
+}
+
+static inline u8 pim_hdr_version(const struct pimhdr *pimhdr)
+{
+ return pimhdr->type >> 4;
+}
+
+static inline u8 pim_hdr_type(const struct pimhdr *pimhdr)
+{
+ return pimhdr->type & 0xf;
+}
+
+/* check if the address is 224.0.0.13, RFC7761 sec 4.3.1 */
+static inline bool pim_ipv4_all_pim_routers(__be32 addr)
+{
+ return addr == htonl(0xE000000D);
+}
#endif
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 5f0e11e7354c..e69e415d0d98 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -14,6 +14,7 @@
#include <linux/device.h>
#define DW_DMA_MAX_NR_MASTERS 4
+#define DW_DMA_MAX_NR_CHANNELS 8
/**
* struct dw_dma_slave - Controller-specific information about a slave
@@ -40,19 +41,18 @@ struct dw_dma_slave {
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
* @is_memcpy: The device channels do support memory-to-memory transfers.
- * @is_nollp: The device channels does not support multi block transfers.
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
* @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
+ * @multi_block: Multi block transfers supported by hardware per channel.
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
bool is_private;
bool is_memcpy;
- bool is_nollp;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
@@ -62,6 +62,7 @@ struct dw_dma_platform_data {
unsigned int block_size;
unsigned char nr_masters;
unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
+ unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h
deleted file mode 100644
index 0a03b0944411..000000000000
--- a/include/linux/platform_data/drv260x-pdata.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Platform data for DRV260X haptics driver family
- *
- * Author: Dan Murphy <dmurphy@ti.com>
- *
- * Copyright: (C) 2014 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#ifndef _LINUX_DRV260X_PDATA_H
-#define _LINUX_DRV260X_PDATA_H
-
-struct drv260x_platform_data {
- u32 library_selection;
- u32 mode;
- u32 vib_rated_voltage;
- u32 vib_overdrive_voltage;
-};
-
-#endif
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 21b15f6fee25..7815d50c26ff 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,6 +8,8 @@
#ifndef __MACB_PDATA_H__
#define __MACB_PDATA_H__
+#include <linux/clk.h>
+
/**
* struct macb_platform_data - platform data for MACB Ethernet
* @phy_mask: phy mask passed when register the MDIO bus
@@ -15,12 +17,16 @@
* @phy_irq_pin: PHY IRQ
* @is_rmii: using RMII interface?
* @rev_eth_addr: reverse Ethernet address byte order
+ * @pclk: platform clock
+ * @hclk: AHB clock
*/
struct macb_platform_data {
u32 phy_mask;
int phy_irq_pin;
u8 is_rmii;
u8 rev_eth_addr;
+ struct clk *pclk;
+ struct clk *hclk;
};
#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mlxcpld-hotplug.h b/include/linux/platform_data/mlxcpld-hotplug.h
new file mode 100644
index 000000000000..e4cfcffaa6f4
--- /dev/null
+++ b/include/linux/platform_data/mlxcpld-hotplug.h
@@ -0,0 +1,99 @@
+/*
+ * include/linux/platform_data/mlxcpld-hotplug.h
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
+#define __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
+
+/**
+ * struct mlxcpld_hotplug_device - I2C device data:
+ * @adapter: I2C device adapter;
+ * @client: I2C device client;
+ * @brdinfo: device board information;
+ * @bus: I2C bus, where device is attached;
+ *
+ * Structure represents I2C hotplug device static data (board topology) and
+ * dynamic data (related kernel objects handles).
+ */
+struct mlxcpld_hotplug_device {
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct i2c_board_info brdinfo;
+ u16 bus;
+};
+
+/**
+ * struct mlxcpld_hotplug_platform_data - device platform data:
+ * @top_aggr_offset: offset of top aggregation interrupt register;
+ * @top_aggr_mask: top aggregation interrupt common mask;
+ * @top_aggr_psu_mask: top aggregation interrupt PSU mask;
+ * @psu_reg_offset: offset of PSU interrupt register;
+ * @psu_mask: PSU interrupt mask;
+ * @psu_count: number of equipped replaceable PSUs;
+ * @psu: pointer to PSU devices data array;
+ * @top_aggr_pwr_mask: top aggregation interrupt power mask;
+ * @pwr_reg_offset: offset of power interrupt register
+ * @pwr_mask: power interrupt mask;
+ * @pwr_count: number of power sources;
+ * @pwr: pointer to power devices data array;
+ * @top_aggr_fan_mask: top aggregation interrupt FAN mask;
+ * @fan_reg_offset: offset of FAN interrupt register;
+ * @fan_mask: FAN interrupt mask;
+ * @fan_count: number of equipped replaceable FANs;
+ * @fan: pointer to FAN devices data array;
+ *
+ * Structure represents board platform data, related to system hotplug events,
+ * like FAN, PSU, power cable insertion and removing. This data provides the
+ * number of hot-pluggable devices and hardware description for event handling.
+ */
+struct mlxcpld_hotplug_platform_data {
+ u16 top_aggr_offset;
+ u8 top_aggr_mask;
+ u8 top_aggr_psu_mask;
+ u16 psu_reg_offset;
+ u8 psu_mask;
+ u8 psu_count;
+ struct mlxcpld_hotplug_device *psu;
+ u8 top_aggr_pwr_mask;
+ u16 pwr_reg_offset;
+ u8 pwr_mask;
+ u8 pwr_count;
+ struct mlxcpld_hotplug_device *pwr;
+ u8 top_aggr_fan_mask;
+ u16 fan_reg_offset;
+ u8 fan_mask;
+ u8 fan_count;
+ struct mlxcpld_hotplug_device *fan;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index c55e42ee57fa..f01659026b26 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -12,9 +12,10 @@
#ifndef __MTD_NAND_S3C2410_H
#define __MTD_NAND_S3C2410_H
+#include <linux/mtd/nand.h>
+
/**
* struct s3c2410_nand_set - define a set of one or more nand chips
- * @disable_ecc: Entirely disable ECC - Dangerous
* @flash_bbt: Openmoko u-boot can create a Bad Block Table
* Setting this flag will allow the kernel to
* look for it at boot time and also skip the NAND
@@ -31,7 +32,6 @@
* a warning at boot time.
*/
struct s3c2410_nand_set {
- unsigned int disable_ecc:1;
unsigned int flash_bbt:1;
unsigned int options;
@@ -40,6 +40,7 @@ struct s3c2410_nand_set {
char *name;
int *nr_map;
struct mtd_partition *partitions;
+ struct device_node *of_node;
};
struct s3c2410_platform_nand {
@@ -51,6 +52,8 @@ struct s3c2410_platform_nand {
unsigned int ignore_unset_ecc:1;
+ nand_ecc_modes_t ecc_mode;
+
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index 5c1e21c87270..da79774078a7 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -40,9 +40,6 @@ struct s3c64xx_spi_info {
int num_cs;
bool no_cs;
int (*cfg_gpio)(void);
- dma_filter_fn filter;
- void *dma_tx;
- void *dma_rx;
};
/**
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h
index e0bc4abe69c2..0926e99f2e8f 100644
--- a/include/linux/platform_data/usb-davinci.h
+++ b/include/linux/platform_data/usb-davinci.h
@@ -11,29 +11,6 @@
#ifndef __ASM_ARCH_USB_H
#define __ASM_ARCH_USB_H
-/* DA8xx CFGCHIP2 (USB 2.0 PHY Control) register bits */
-#define CFGCHIP2_PHYCLKGD (1 << 17)
-#define CFGCHIP2_VBUSSENSE (1 << 16)
-#define CFGCHIP2_RESET (1 << 15)
-#define CFGCHIP2_OTGMODE (3 << 13)
-#define CFGCHIP2_NO_OVERRIDE (0 << 13)
-#define CFGCHIP2_FORCE_HOST (1 << 13)
-#define CFGCHIP2_FORCE_DEVICE (2 << 13)
-#define CFGCHIP2_FORCE_HOST_VBUS_LOW (3 << 13)
-#define CFGCHIP2_USB1PHYCLKMUX (1 << 12)
-#define CFGCHIP2_USB2PHYCLKMUX (1 << 11)
-#define CFGCHIP2_PHYPWRDN (1 << 10)
-#define CFGCHIP2_OTGPWRDN (1 << 9)
-#define CFGCHIP2_DATPOL (1 << 8)
-#define CFGCHIP2_USB1SUSPENDM (1 << 7)
-#define CFGCHIP2_PHY_PLLON (1 << 6) /* override PLL suspend */
-#define CFGCHIP2_SESENDEN (1 << 5) /* Vsess_end comparator */
-#define CFGCHIP2_VBDTCTEN (1 << 4) /* Vbus comparator */
-#define CFGCHIP2_REFFREQ (0xf << 0)
-#define CFGCHIP2_REFFREQ_12MHZ (1 << 0)
-#define CFGCHIP2_REFFREQ_24MHZ (2 << 0)
-#define CFGCHIP2_REFFREQ_48MHZ (3 << 0)
-
struct da8xx_ohci_root_hub;
typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub,
diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h
index ecbde7a5548e..7b78793f07d7 100644
--- a/include/linux/pm-trace.h
+++ b/include/linux/pm-trace.h
@@ -1,11 +1,17 @@
#ifndef PM_TRACE_H
#define PM_TRACE_H
+#include <linux/types.h>
#ifdef CONFIG_PM_TRACE
#include <asm/pm-trace.h>
-#include <linux/types.h>
extern int pm_trace_enabled;
+extern bool pm_trace_rtc_abused;
+
+static inline bool pm_trace_rtc_valid(void)
+{
+ return !pm_trace_rtc_abused;
+}
static inline int pm_trace_is_enabled(void)
{
@@ -24,6 +30,7 @@ extern int show_trace_dev_match(char *buf, size_t size);
#else
+static inline bool pm_trace_rtc_valid(void) { return true; }
static inline int pm_trace_is_enabled(void) { return 0; }
#define TRACE_DEVICE(dev) do { } while (0)
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 06eb353182ab..f926af41e122 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -258,7 +258,7 @@ typedef struct pm_message {
* example, if it detects that a child was unplugged while the system was
* asleep).
*
- * Refer to Documentation/power/devices.txt for more information about the role
+ * Refer to Documentation/power/admin-guide/devices.rst for more information about the role
* of the above callbacks in the system suspend process.
*
* There also are callbacks related to runtime power management of devices.
@@ -559,6 +559,7 @@ struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
+ bool in_dpm_list:1; /* Owned by the PM core */
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
@@ -596,6 +597,7 @@ struct dev_pm_info {
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
+ unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index a09fe5c009c8..81ece61075df 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -15,11 +15,11 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/notifier.h>
+#include <linux/spinlock.h>
/* Defines used for the flags field in the struct generic_pm_domain */
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
-
-#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */
+#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -40,15 +40,18 @@ struct gpd_dev_ops {
struct genpd_power_state {
s64 power_off_latency_ns;
s64 power_on_latency_ns;
+ s64 residency_ns;
+ struct fwnode_handle *fwnode;
};
+struct genpd_lock_ops;
+
struct generic_pm_domain {
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
struct list_head master_links; /* Links with PM domain as a master */
struct list_head slave_links; /* Links with PM domain as a slave */
struct list_head dev_list; /* List of devices */
- struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
struct fwnode_handle *provider; /* Identity of the domain provider */
@@ -70,9 +73,18 @@ struct generic_pm_domain {
void (*detach_dev)(struct generic_pm_domain *domain,
struct device *dev);
unsigned int flags; /* Bit field of configs for genpd */
- struct genpd_power_state states[GENPD_MAX_NUM_STATES];
+ struct genpd_power_state *states;
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
+ void *free; /* Free the state that was allocated for default */
+ const struct genpd_lock_ops *lock_ops;
+ union {
+ struct mutex mlock;
+ struct {
+ spinlock_t slock;
+ unsigned long lock_flags;
+ };
+ };
};
@@ -205,6 +217,8 @@ extern int of_genpd_add_device(struct of_phandle_args *args,
extern int of_genpd_add_subdomain(struct of_phandle_args *parent,
struct of_phandle_args *new_subdomain);
extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
+extern int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n);
int genpd_dev_pm_attach(struct device *dev);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -234,6 +248,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent,
return -ENODEV;
}
+static inline int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n)
+{
+ return -ENODEV;
+}
+
static inline int genpd_dev_pm_attach(struct device *dev)
{
return -ENODEV;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index bca26157f5b6..0edd88f93904 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -17,13 +17,65 @@
#include <linux/err.h>
#include <linux/notifier.h>
+struct clk;
+struct regulator;
struct dev_pm_opp;
struct device;
+struct opp_table;
enum dev_pm_opp_event {
OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
};
+/**
+ * struct dev_pm_opp_supply - Power supply voltage/current values
+ * @u_volt: Target voltage in microvolts corresponding to this OPP
+ * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
+ * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
+ * @u_amp: Maximum current drawn by the device in microamperes
+ *
+ * This structure stores the voltage/current values for a single power supply.
+ */
+struct dev_pm_opp_supply {
+ unsigned long u_volt;
+ unsigned long u_volt_min;
+ unsigned long u_volt_max;
+ unsigned long u_amp;
+};
+
+/**
+ * struct dev_pm_opp_info - OPP freq/voltage/current values
+ * @rate: Target clk rate in hz
+ * @supplies: Array of voltage/current values for all power supplies
+ *
+ * This structure stores the freq/voltage/current values for a single OPP.
+ */
+struct dev_pm_opp_info {
+ unsigned long rate;
+ struct dev_pm_opp_supply *supplies;
+};
+
+/**
+ * struct dev_pm_set_opp_data - Set OPP data
+ * @old_opp: Old OPP info
+ * @new_opp: New OPP info
+ * @regulators: Array of regulator pointers
+ * @regulator_count: Number of regulators
+ * @clk: Pointer to clk
+ * @dev: Pointer to the struct device
+ *
+ * This structure contains all information required for setting an OPP.
+ */
+struct dev_pm_set_opp_data {
+ struct dev_pm_opp_info old_opp;
+ struct dev_pm_opp_info new_opp;
+
+ struct regulator **regulators;
+ unsigned int regulator_count;
+ struct clk *clk;
+ struct device *dev;
+};
+
#if defined(CONFIG_PM_OPP)
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
@@ -62,8 +114,10 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
void dev_pm_opp_put_supported_hw(struct device *dev);
int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
void dev_pm_opp_put_prop_name(struct device *dev);
-int dev_pm_opp_set_regulator(struct device *dev, const char *name);
-void dev_pm_opp_put_regulator(struct device *dev);
+struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
+void dev_pm_opp_put_regulators(struct opp_table *opp_table);
+int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
+void dev_pm_opp_register_put_opp_helper(struct device *dev);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@@ -163,6 +217,14 @@ static inline int dev_pm_opp_set_supported_hw(struct device *dev,
static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
+static inline int dev_pm_opp_register_set_opp_helper(struct device *dev,
+ int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+ return -ENOTSUPP;
+}
+
+static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {}
+
static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
return -ENOTSUPP;
@@ -170,12 +232,12 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
-static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
{
- return -ENOTSUPP;
+ return ERR_PTR(-ENOTSUPP);
}
-static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 2e14d2667b6c..ca4823e675e2 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -55,18 +55,17 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
+extern void pm_runtime_clean_up_links(struct device *dev);
+extern void pm_runtime_get_suppliers(struct device *dev);
+extern void pm_runtime_put_suppliers(struct device *dev);
+extern void pm_runtime_new_link(struct device *dev);
+extern void pm_runtime_drop_link(struct device *dev);
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
}
-static inline bool pm_children_suspended(struct device *dev)
-{
- return dev->power.ignore_children
- || !atomic_read(&dev->power.child_count);
-}
-
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
@@ -162,7 +161,6 @@ static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
-static inline bool pm_children_suspended(struct device *dev) { return false; }
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool device_run_wake(struct device *dev) { return false; }
@@ -186,6 +184,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
+static inline void pm_runtime_clean_up_links(struct device *dev) {}
+static inline void pm_runtime_get_suppliers(struct device *dev) {}
+static inline void pm_runtime_put_suppliers(struct device *dev) {}
+static inline void pm_runtime_new_link(struct device *dev) {}
+static inline void pm_runtime_drop_link(struct device *dev) {}
#endif /* !CONFIG_PM */
@@ -265,9 +268,9 @@ static inline int pm_runtime_set_active(struct device *dev)
return __pm_runtime_set_status(dev, RPM_ACTIVE);
}
-static inline void pm_runtime_set_suspended(struct device *dev)
+static inline int pm_runtime_set_suspended(struct device *dev)
{
- __pm_runtime_set_status(dev, RPM_SUSPENDED);
+ return __pm_runtime_set_status(dev, RPM_SUSPENDED);
}
static inline void pm_runtime_disable(struct device *dev)
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 37b057b63b46..a46d6755035e 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -8,7 +8,7 @@
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <uapi/linux/poll.h>
extern struct ctl_table epoll_table[]; /* for sysctl */
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index e30deb046156..bed9557b69e7 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -4,7 +4,8 @@
enum bq27xxx_chip {
BQ27000 = 1, /* bq27000, bq27200 */
BQ27010, /* bq27010, bq27210 */
- BQ27500, /* bq27500, bq27510, bq27520 */
+ BQ27500, /* bq27500 */
+ BQ27510, /* bq27510, bq27520 */
BQ27530, /* bq27530, bq27531 */
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
BQ27545, /* bq27545 */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 75e4e30677f1..7eeceac52dea 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -65,19 +65,24 @@
/*
* Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
+ *
+ * in_irq() - We're in (hard) IRQ context
+ * in_softirq() - We have BH disabled, or are processing softirqs
+ * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
+ * in_serving_softirq() - We're in softirq context
+ * in_nmi() - We're in NMI context
+ * in_task() - We're in task context
+ *
+ * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
+ * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
diff --git a/include/linux/printk.h b/include/linux/printk.h
index eac1af8502bb..3472cc6b7a60 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -10,6 +10,8 @@
extern const char linux_banner[];
extern const char linux_proc_banner[];
+#define PRINTK_MAX_SINGLE_HEADER_LEN 2
+
static inline int printk_get_level(const char *buffer)
{
if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
@@ -31,6 +33,14 @@ static inline const char *printk_skip_level(const char *buffer)
return buffer;
}
+static inline const char *printk_skip_headers(const char *buffer)
+{
+ while (printk_get_level(buffer))
+ buffer = printk_skip_level(buffer);
+
+ return buffer;
+}
+
#define CONSOLE_EXT_LOG_MAX 8192
/* printk's without a loglevel use this.. */
@@ -40,10 +50,15 @@ static inline const char *printk_skip_level(const char *buffer)
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */
-#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
+/*
+ * Default used to be hard-coded at 7, we're now allowing it to be set from
+ * kernel config.
+ */
+#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
+
extern int console_printk[];
#define console_loglevel (console_printk[0])
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index b97bf2ef996e..2d2bf592d9db 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -21,6 +21,7 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
struct proc_dir_entry *, void *);
extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
struct proc_dir_entry *);
+struct proc_dir_entry *proc_create_mount_point(const char *name);
extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *,
@@ -56,6 +57,7 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent,const char *dest) { return NULL;}
static inline struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent) {return NULL;}
+static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
@@ -82,4 +84,8 @@ static inline struct proc_dir_entry *proc_net_mkdir(
return proc_mkdir_data(name, 0, parent, net);
}
+struct ns_common;
+int open_related_ns(struct ns_common *ns,
+ struct ns_common *(*get_ns)(struct ns_common *ns));
+
#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 92013cc9cc8c..0da29cae009b 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -89,4 +89,80 @@ extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
+struct pstore_ftrace_record {
+ unsigned long ip;
+ unsigned long parent_ip;
+ u64 ts;
+};
+
+/*
+ * ftrace related stuff: Both backends and frontends need these so expose
+ * them here.
+ */
+
+#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB)
+#define PSTORE_CPU_IN_IP 0x1
+#elif NR_CPUS <= 4 && defined(CONFIG_ARM)
+#define PSTORE_CPU_IN_IP 0x3
+#endif
+
+#define TS_CPU_SHIFT 8
+#define TS_CPU_MASK (BIT(TS_CPU_SHIFT) - 1)
+
+/*
+ * If CPU number can be stored in IP, store it there, otherwise store it in
+ * the time stamp. This means more timestamp resolution is available when
+ * the CPU can be stored in the IP.
+ */
+#ifdef PSTORE_CPU_IN_IP
+static inline void
+pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
+{
+ rec->ip |= cpu;
+}
+
+static inline unsigned int
+pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
+{
+ return rec->ip & PSTORE_CPU_IN_IP;
+}
+
+static inline u64
+pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec)
+{
+ return rec->ts;
+}
+
+static inline void
+pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val)
+{
+ rec->ts = val;
+}
+#else
+static inline void
+pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
+{
+ rec->ts &= ~(TS_CPU_MASK);
+ rec->ts |= cpu;
+}
+
+static inline unsigned int
+pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
+{
+ return rec->ts & TS_CPU_MASK;
+}
+
+static inline u64
+pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec)
+{
+ return rec->ts >> TS_CPU_SHIFT;
+}
+
+static inline void
+pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val)
+{
+ rec->ts = (rec->ts & TS_CPU_MASK) | (val << TS_CPU_SHIFT);
+}
+#endif
+
#endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index c668c861c96c..9395f06e8372 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -24,6 +24,13 @@
#include <linux/list.h>
#include <linux/types.h>
+/*
+ * Choose whether access to the RAM zone requires locking or not. If a zone
+ * can be written to from different CPUs like with ftrace for example, then
+ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
+ */
+#define PRZ_FLAG_NO_LOCK BIT(0)
+
struct persistent_ram_buffer;
struct rs_control;
@@ -40,6 +47,8 @@ struct persistent_ram_zone {
void *vaddr;
struct persistent_ram_buffer *buffer;
size_t buffer_size;
+ u32 flags;
+ raw_spinlock_t buffer_lock;
/* ECC correction */
char *par_buffer;
@@ -55,7 +64,7 @@ struct persistent_ram_zone {
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype);
+ unsigned int memtype, u32 flags);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
@@ -77,6 +86,8 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
* @mem_address physical memory address to contain ramoops
*/
+#define RAMOOPS_FLAG_FTRACE_PER_CPU BIT(0)
+
struct ramoops_platform_data {
unsigned long mem_size;
phys_addr_t mem_address;
@@ -86,6 +97,7 @@ struct ramoops_platform_data {
unsigned long ftrace_size;
unsigned long pmsg_size;
int dump_oops;
+ u32 flags;
struct persistent_ram_ecc_info ecc_info;
};
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 5ad54fc66cf0..a026bfd089db 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -58,7 +58,14 @@ struct system_device_crosststamp;
*
* clock operations
*
+ * @adjfine: Adjusts the frequency of the hardware clock.
+ * parameter scaled_ppm: Desired frequency offset from
+ * nominal frequency in parts per million, but with a
+ * 16 bit binary fractional field.
+ *
* @adjfreq: Adjusts the frequency of the hardware clock.
+ * This method is deprecated. New drivers should implement
+ * the @adjfine method instead.
* parameter delta: Desired frequency offset from nominal frequency
* in parts per billion
*
@@ -108,6 +115,7 @@ struct ptp_clock_info {
int n_pins;
int pps;
struct ptp_pin_desc *pin_config;
+ int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
@@ -122,30 +130,6 @@ struct ptp_clock_info {
struct ptp_clock;
-/**
- * ptp_clock_register() - register a PTP hardware clock driver
- *
- * @info: Structure describing the new clock.
- * @parent: Pointer to the parent device of the new clock.
- *
- * Returns a valid pointer on success or PTR_ERR on failure. If PHC
- * support is missing at the configuration level, this function
- * returns NULL, and drivers are expected to gracefully handle that
- * case separately.
- */
-
-extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
- struct device *parent);
-
-/**
- * ptp_clock_unregister() - unregister a PTP hardware clock driver
- *
- * @ptp: The clock to remove from service.
- */
-
-extern int ptp_clock_unregister(struct ptp_clock *ptp);
-
-
enum ptp_clock_events {
PTP_CLOCK_ALARM,
PTP_CLOCK_EXTTS,
@@ -171,6 +155,31 @@ struct ptp_clock_event {
};
};
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+/**
+ * ptp_clock_register() - register a PTP hardware clock driver
+ *
+ * @info: Structure describing the new clock.
+ * @parent: Pointer to the parent device of the new clock.
+ *
+ * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * support is missing at the configuration level, this function
+ * returns NULL, and drivers are expected to gracefully handle that
+ * case separately.
+ */
+
+extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent);
+
+/**
+ * ptp_clock_unregister() - unregister a PTP hardware clock driver
+ *
+ * @ptp: The clock to remove from service.
+ */
+
+extern int ptp_clock_unregister(struct ptp_clock *ptp);
+
/**
* ptp_clock_event() - notify the PTP layer about an event
*
@@ -202,4 +211,20 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
+#else
+static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent)
+{ return NULL; }
+static inline int ptp_clock_unregister(struct ptp_clock *ptp)
+{ return 0; }
+static inline void ptp_clock_event(struct ptp_clock *ptp,
+ struct ptp_clock_event *event)
+{ }
+static inline int ptp_clock_index(struct ptp_clock *ptp)
+{ return -1; }
+static inline int ptp_find_pin(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{ return -1; }
+#endif
+
#endif
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 504c98a278d4..e0e539321ab9 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -8,6 +8,9 @@
#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
#include <uapi/linux/ptrace.h>
+extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
+ void *buf, int len, unsigned int gup_flags);
+
/*
* Ptrace flags
*
@@ -19,7 +22,6 @@
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
-#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */
#define PT_OPT_FLAG_SHIFT 3
/* PT_TRACE_* event enable flags */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 72d88cf3ca25..37dfba101c6c 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -56,23 +56,6 @@ struct qed_chain_pbl_u32 {
u32 cons_page_idx;
};
-struct qed_chain_pbl {
- /* Base address of a pre-allocated buffer for pbl */
- dma_addr_t p_phys_table;
- void *p_virt_table;
-
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
- */
- void **pp_virt_addr_tbl;
-
- /* Index to current used page by producer/consumer */
- union {
- struct qed_chain_pbl_u16 pbl16;
- struct qed_chain_pbl_u32 pbl32;
- } u;
-};
-
struct qed_chain_u16 {
/* Cyclic index of next element to produce/consme */
u16 prod_idx;
@@ -86,46 +69,78 @@ struct qed_chain_u32 {
};
struct qed_chain {
- void *p_virt_addr;
- dma_addr_t p_phys_addr;
- void *p_prod_elem;
- void *p_cons_elem;
+ /* fastpath portion of the chain - required for commands such
+ * as produce / consume.
+ */
+ /* Point to next element to produce/consume */
+ void *p_prod_elem;
+ void *p_cons_elem;
+
+ /* Fastpath portions of the PBL [if exists] */
+ struct {
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
- enum qed_chain_mode mode;
- enum qed_chain_use_mode intended_use; /* used to produce/consume */
- enum qed_chain_cnt_type cnt_type;
+ union {
+ struct qed_chain_pbl_u16 u16;
+ struct qed_chain_pbl_u32 u32;
+ } c;
+ } pbl;
union {
struct qed_chain_u16 chain16;
struct qed_chain_u32 chain32;
} u;
+ /* Capacity counts only usable elements */
+ u32 capacity;
u32 page_cnt;
- /* Number of elements - capacity is for usable elements only,
- * while size will contain total number of elements [for entire chain].
+ enum qed_chain_mode mode;
+
+ /* Elements information for fast calculations */
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_size;
+ u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
+
+ u8 cnt_type;
+
+ /* Slowpath of the chain - required for initialization and destruction,
+ * but isn't involved in regular functionality.
*/
- u32 capacity;
+
+ /* Base address of a pre-allocated buffer for pbl */
+ struct {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ } pbl_sp;
+
+ /* Address of first page of the chain - the address is required
+ * for fastpath operation [consume/produce] but only for the the SINGLE
+ * flavour which isn't considered fastpath [== SPQ].
+ */
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+
+ /* Total number of elements [for entire chain] */
u32 size;
- /* Elements information for fast calculations */
- u16 elem_per_page;
- u16 elem_per_page_mask;
- u16 elem_unusable;
- u16 usable_per_page;
- u16 elem_size;
- u16 next_page_mask;
- struct qed_chain_pbl pbl;
+ u8 intended_use;
};
#define QED_CHAIN_PBL_ENTRY_SIZE (8)
#define QED_CHAIN_PAGE_SIZE (0x1000)
#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
-#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
- ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
- (1 + ((sizeof(struct qed_chain_next) - 1) / \
- (elem_size))) : 0)
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
+ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
+ (elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \
@@ -186,7 +201,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
return p_chain->usable_per_page;
}
-static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
{
return p_chain->elem_unusable;
}
@@ -198,7 +213,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
{
- return p_chain->pbl.p_phys_table;
+ return p_chain->pbl_sp.p_phys_table;
}
/**
@@ -214,10 +229,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
void **p_next_elem, void *idx_to_inc, void *page_to_inc)
-
{
struct qed_chain_next *p_next = NULL;
u32 page_index = 0;
+
switch (p_chain->mode) {
case QED_CHAIN_MODE_NEXT_PTR:
p_next = *p_next_elem;
@@ -305,7 +320,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -314,7 +329,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -378,7 +393,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
@@ -387,8 +402,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
- qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
+ qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
p_chain->u.chain32.cons_idx++;
@@ -429,25 +444,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) {
- p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
- p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
} else {
- p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
- p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+ p_chain->pbl.c.u32.prod_page_idx = reset_val;
+ p_chain->pbl.c.u32.cons_page_idx = reset_val;
}
}
switch (p_chain->intended_use) {
- case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
- case QED_CHAIN_USE_TO_PRODUCE:
- /* Do nothing */
- break;
-
case QED_CHAIN_USE_TO_CONSUME:
/* produce empty elements */
for (i = 0; i < p_chain->capacity; i++)
qed_chain_recycle_consumed(p_chain);
break;
+
+ case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case QED_CHAIN_USE_TO_PRODUCE:
+ default:
+ /* Do nothing */
+ break;
}
}
@@ -473,13 +489,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->p_virt_addr = NULL;
p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
- p_chain->intended_use = intended_use;
+ p_chain->intended_use = (u8)intended_use;
p_chain->mode = mode;
- p_chain->cnt_type = cnt_type;
+ p_chain->cnt_type = (u8)cnt_type;
- p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+ p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+ p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask);
@@ -488,8 +504,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
- p_chain->pbl.p_phys_table = 0;
- p_chain->pbl.p_virt_table = NULL;
+ p_chain->pbl_sp.p_phys_table = 0;
+ p_chain->pbl_sp.p_virt_table = NULL;
p_chain->pbl.pp_virt_addr_tbl = NULL;
}
@@ -530,8 +546,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
dma_addr_t p_phys_pbl,
void **pp_virt_addr_tbl)
{
- p_chain->pbl.p_phys_table = p_phys_pbl;
- p_chain->pbl.p_virt_table = p_virt_pbl;
+ p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+ p_chain->pbl_sp.p_virt_table = p_virt_pbl;
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 33c24ebc9b7f..7a52f7c58c37 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -15,6 +15,29 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
+struct qed_queue_start_common_params {
+ /* Should always be relative to entity sending this. */
+ u8 vport_id;
+ u16 queue_id;
+
+ /* Relative, but relevant only for PFs */
+ u8 stats_id;
+
+ /* These are always absolute */
+ u16 sb;
+ u8 sb_idx;
+};
+
+struct qed_rxq_start_ret_params {
+ void __iomem *p_prod;
+ void *p_handle;
+};
+
+struct qed_txq_start_ret_params {
+ void __iomem *p_doorbell;
+ void *p_handle;
+};
+
struct qed_dev_eth_info {
struct qed_dev_info common;
@@ -22,7 +45,8 @@ struct qed_dev_eth_info {
u8 num_tc;
u8 port_mac[ETH_ALEN];
- u8 num_vlan_filters;
+ u16 num_vlan_filters;
+ u16 num_mac_filters;
/* Legacy VF - this affects the datapath, so qede has to know */
bool is_legacy;
@@ -55,18 +79,6 @@ struct qed_start_vport_params {
bool clear_stats;
};
-struct qed_stop_rxq_params {
- u8 rss_id;
- u8 rx_queue_id;
- u8 vport_id;
- bool eq_completion_only;
-};
-
-struct qed_stop_txq_params {
- u8 rss_id;
- u8 tx_queue_id;
-};
-
enum qed_filter_rx_mode_type {
QED_FILTER_RX_MODE_TYPE_REGULAR,
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
@@ -111,15 +123,6 @@ struct qed_filter_params {
union qed_filter_type_params filter;
};
-struct qed_queue_start_common_params {
- u8 rss_id;
- u8 queue_id;
- u8 vport_id;
- u16 sb;
- u16 sb_idx;
- u16 vf_qid;
-};
-
struct qed_tunn_params {
u16 vxlan_port;
u8 update_vxlan_port;
@@ -129,7 +132,7 @@ struct qed_tunn_params {
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
- void (*force_mac) (void *dev, u8 *mac);
+ void (*force_mac) (void *dev, u8 *mac, bool forced);
};
#ifdef CONFIG_DCB
@@ -219,24 +222,24 @@ struct qed_eth_ops {
struct qed_update_vport_params *params);
int (*q_rx_start)(struct qed_dev *cdev,
+ u8 rss_num,
struct qed_queue_start_common_params *params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
- void __iomem **pp_prod);
+ struct qed_rxq_start_ret_params *ret_params);
- int (*q_rx_stop)(struct qed_dev *cdev,
- struct qed_stop_rxq_params *params);
+ int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
int (*q_tx_start)(struct qed_dev *cdev,
+ u8 rss_num,
struct qed_queue_start_common_params *params,
dma_addr_t pbl_addr,
u16 pbl_size,
- void __iomem **pp_doorbell);
+ struct qed_txq_start_ret_params *ret_params);
- int (*q_tx_stop)(struct qed_dev *cdev,
- struct qed_stop_txq_params *params);
+ int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
int (*filter_config)(struct qed_dev *cdev,
struct qed_filter_params *params);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 8978a60371f4..4b454f4f5b25 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -166,6 +166,7 @@ struct qed_iscsi_pf_params {
u32 max_cwnd;
u16 cq_num_entries;
u16 cmdq_num_entries;
+ u32 two_msl_timer;
u16 dup_ack_threshold;
u16 tx_sws_timer;
u16 min_rto;
@@ -267,11 +268,15 @@ struct qed_dev_info {
u8 mf_mode;
bool tx_switching;
bool rdma_supported;
+ u16 mtu;
+
+ bool wol_support;
};
enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE,
QED_SB_TYPE_CNQ,
+ QED_SB_TYPE_STORAGE,
};
enum qed_protocol {
@@ -401,6 +406,15 @@ struct qed_selftest_ops {
* @return 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_nvram - Perform nvram test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_nvram) (struct qed_dev *cdev);
};
struct qed_common_ops {
@@ -554,6 +568,41 @@ struct qed_common_ops {
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
+
+/**
+ * @brief update_drv_state - API to inform the change in the driver state.
+ *
+ * @param cdev
+ * @param active
+ *
+ */
+ int (*update_drv_state)(struct qed_dev *cdev, bool active);
+
+/**
+ * @brief update_mac - API to inform the change in the mac address
+ *
+ * @param cdev
+ * @param mac
+ *
+ */
+ int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+
+/**
+ * @brief update_mtu - API to inform the change in the mtu
+ *
+ * @param cdev
+ * @param mtu
+ *
+ */
+ int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
+
+/**
+ * @brief update_wol - update of changes in the WoL configuration
+ *
+ * @param cdev
+ * @param enabled - true iff WoL should be enabled.
+ */
+ int (*update_wol) (struct qed_dev *cdev, bool enabled);
};
#define MASK_FIELD(_name, _value) \
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
new file mode 100644
index 000000000000..d27912480cb3
--- /dev/null
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -0,0 +1,229 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_ISCSI_IF_H
+#define _QED_ISCSI_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+
+typedef int (*iscsi_event_cb_t) (void *context,
+ u8 fw_event_code, void *fw_handle);
+struct qed_iscsi_stats {
+ u64 iscsi_rx_bytes_cnt;
+ u64 iscsi_rx_packet_cnt;
+ u64 iscsi_rx_new_ooo_isle_events_cnt;
+ u32 iscsi_cmdq_threshold_cnt;
+ u32 iscsi_rq_threshold_cnt;
+ u32 iscsi_immq_threshold_cnt;
+
+ u64 iscsi_rx_dropped_pdus_task_not_valid;
+
+ u64 iscsi_rx_data_pdu_cnt;
+ u64 iscsi_rx_r2t_pdu_cnt;
+ u64 iscsi_rx_total_pdu_cnt;
+
+ u64 iscsi_tx_go_to_slow_start_event_cnt;
+ u64 iscsi_tx_fast_retransmit_event_cnt;
+
+ u64 iscsi_tx_data_pdu_cnt;
+ u64 iscsi_tx_r2t_pdu_cnt;
+ u64 iscsi_tx_total_pdu_cnt;
+
+ u64 iscsi_tx_bytes_cnt;
+ u64 iscsi_tx_packet_cnt;
+};
+
+struct qed_dev_iscsi_info {
+ struct qed_dev_info common;
+
+ void __iomem *primary_dbq_rq_addr;
+ void __iomem *secondary_bdq_rq_addr;
+};
+
+struct qed_iscsi_id_params {
+ u8 mac[ETH_ALEN];
+ u32 ip[4];
+ u16 port;
+};
+
+struct qed_iscsi_params_offload {
+ u8 layer_code;
+ dma_addr_t sq_pbl_addr;
+ u32 initial_ack;
+
+ struct qed_iscsi_id_params src;
+ struct qed_iscsi_id_params dst;
+ u16 vlan_id;
+ u8 tcp_flags;
+ u8 ip_version;
+ u8 default_cq;
+
+ u8 ka_max_probe_cnt;
+ u8 dup_ack_theshold;
+ u32 rcv_next;
+ u32 snd_una;
+ u32 snd_next;
+ u32 snd_max;
+ u32 snd_wnd;
+ u32 rcv_wnd;
+ u32 snd_wl1;
+ u32 cwnd;
+ u32 ss_thresh;
+ u16 srtt;
+ u16 rtt_var;
+ u32 ts_time;
+ u32 ts_recent;
+ u32 ts_recent_age;
+ u32 total_rt;
+ u32 ka_timeout_delta;
+ u32 rt_timeout_delta;
+ u8 dup_ack_cnt;
+ u8 snd_wnd_probe_cnt;
+ u8 ka_probe_cnt;
+ u8 rt_cnt;
+ u32 flow_label;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 initial_rcv_wnd;
+ u8 ttl;
+ u8 tos_or_tc;
+ u16 remote_port;
+ u16 local_port;
+ u16 mss;
+ u8 snd_wnd_scale;
+ u8 rcv_wnd_scale;
+ u32 ts_ticks_per_second;
+ u16 da_timeout_value;
+ u8 ack_frequency;
+};
+
+struct qed_iscsi_params_update {
+ u8 update_flag;
+#define QED_ISCSI_CONN_HD_EN BIT(0)
+#define QED_ISCSI_CONN_DD_EN BIT(1)
+#define QED_ISCSI_CONN_INITIAL_R2T BIT(2)
+#define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3)
+
+ u32 max_seq_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+ u32 first_seq_length;
+ u32 exp_stat_sn;
+};
+
+#define MAX_TID_BLOCKS_ISCSI (512)
+struct qed_iscsi_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_ISCSI];
+};
+
+struct qed_iscsi_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+/**
+ * struct qed_iscsi_ops - qed iSCSI operations.
+ * @common: common operations pointer
+ * @ll2: light L2 operations pointer
+ * @fill_dev_info: fills iSCSI specific information
+ * @param cdev
+ * @param info
+ * @return 0 on sucesss, otherwise error value.
+ * @register_ops: register iscsi operations
+ * @param cdev
+ * @param ops - specified using qed_iscsi_cb_ops
+ * @param cookie - driver private
+ * @start: iscsi in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: iscsi in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new iscsi connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * @return 0 on sucesss, otherwise error value.
+ * @release_conn: release a previously acquired iscsi connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @update_conn: updates an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @clear_sq: clear all task in sq
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @get_stats: iSCSI related statistics
+ * @param cdev
+ * @param stats - pointer to struck that would be filled
+ * we stats
+ * @return 0 on success, error otherwise.
+ */
+struct qed_iscsi_ops {
+ const struct qed_common_ops *common;
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_iscsi_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_iscsi_cb_ops *ops, void *cookie);
+
+ int (*start)(struct qed_dev *cdev,
+ struct qed_iscsi_tid *tasks,
+ void *event_context, iscsi_event_cb_t async_event_cb);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_iscsi_params_offload *conn_info);
+
+ int (*update_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_iscsi_params_update *conn_info);
+
+ int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
+
+ int (*clear_sq)(struct qed_dev *cdev, u32 handle);
+
+ int (*get_stats)(struct qed_dev *cdev,
+ struct qed_iscsi_stats *stats);
+};
+
+const struct qed_iscsi_ops *qed_get_iscsi_ops(void);
+void qed_put_iscsi_ops(void);
+#endif
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 55107a8ff887..3434eef2a5aa 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -431,7 +431,7 @@ struct qc_info {
/* Operations handling requests from userspace */
struct quotactl_ops {
- int (*quota_on)(struct super_block *, int, int, struct path *);
+ int (*quota_on)(struct super_block *, int, int, const struct path *);
int (*quota_off)(struct super_block *, int);
int (*quota_enable)(struct super_block *, unsigned int);
int (*quota_disable)(struct super_block *, unsigned int);
@@ -520,7 +520,6 @@ static inline void quota_send_warning(struct kqid qid, dev_t dev,
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
struct mutex dqio_mutex; /* lock device while I/O in progress */
- struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f00fa86ac966..799a63d0e1a8 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -90,7 +90,7 @@ int dquot_file_open(struct inode *inode, struct file *file);
int dquot_enable(struct inode *inode, int type, int format_id,
unsigned int flags);
int dquot_quota_on(struct super_block *sb, int type, int format_id,
- struct path *path);
+ const struct path *path);
int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type);
int dquot_quota_off(struct super_block *sb, int type);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index af3581b8a451..52bda854593b 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -80,26 +80,25 @@ static inline bool radix_tree_is_internal_node(void *ptr)
#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
-/* Internally used bits of node->count */
-#define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1)
-#define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
-
+/*
+ * @count is the count of every non-NULL element in the ->slots array
+ * whether that is an exceptional entry, a retry entry, a user pointer,
+ * a sibling entry or a pointer to the next level of the tree.
+ * @exceptional is the count of every element in ->slots which is
+ * either radix_tree_exceptional_entry() or is a sibling entry for an
+ * exceptional entry.
+ */
struct radix_tree_node {
- unsigned char shift; /* Bits remaining in each slot */
- unsigned char offset; /* Slot offset in parent */
- unsigned int count;
+ unsigned char shift; /* Bits remaining in each slot */
+ unsigned char offset; /* Slot offset in parent */
+ unsigned char count; /* Total entry count */
+ unsigned char exceptional; /* Exceptional entry count */
+ struct radix_tree_node *parent; /* Used when ascending tree */
+ void *private_data; /* For tree user */
union {
- struct {
- /* Used when ascending tree */
- struct radix_tree_node *parent;
- /* For tree user */
- void *private_data;
- };
- /* Used when freeing node */
- struct rcu_head rcu_head;
+ struct list_head private_list; /* For tree user */
+ struct rcu_head rcu_head; /* Used when freeing node */
};
- /* For tree user */
- struct list_head private_list;
void __rcu *slots[RADIX_TREE_MAP_SIZE];
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
@@ -130,6 +129,41 @@ static inline bool radix_tree_empty(struct radix_tree_root *root)
}
/**
+ * struct radix_tree_iter - radix tree iterator state
+ *
+ * @index: index of current slot
+ * @next_index: one beyond the last index for this chunk
+ * @tags: bit-mask for tag-iterating
+ * @node: node that contains current slot
+ * @shift: shift for the node that holds our slots
+ *
+ * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
+ * subinterval of slots contained within one radix tree leaf node. It is
+ * described by a pointer to its first slot and a struct radix_tree_iter
+ * which holds the chunk's position in the tree and its size. For tagged
+ * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
+ * radix tree tag.
+ */
+struct radix_tree_iter {
+ unsigned long index;
+ unsigned long next_index;
+ unsigned long tags;
+ struct radix_tree_node *node;
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+ unsigned int shift;
+#endif
+};
+
+static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
+{
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+ return iter->shift;
+#else
+ return 0;
+#endif
+}
+
+/**
* Radix-tree synchronization
*
* The radix-tree API requires that users provide all synchronisation (with
@@ -248,20 +282,6 @@ static inline int radix_tree_exception(void *arg)
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
}
-/**
- * radix_tree_replace_slot - replace item in a slot
- * @pslot: pointer to slot, returned by radix_tree_lookup_slot
- * @item: new item to store in the slot.
- *
- * For use with radix_tree_lookup_slot(). Caller must hold tree write locked
- * across slot lookup and replacement.
- */
-static inline void radix_tree_replace_slot(void **pslot, void *item)
-{
- BUG_ON(radix_tree_is_internal_node(item));
- rcu_assign_pointer(*pslot, item);
-}
-
int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
unsigned order, struct radix_tree_node **nodep,
void ***slotp);
@@ -276,8 +296,19 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
struct radix_tree_node **nodep, void ***slotp);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
-bool __radix_tree_delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node);
+typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
+void __radix_tree_replace(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ void **slot, void *item,
+ radix_tree_update_node_t update_node, void *private);
+void radix_tree_iter_replace(struct radix_tree_root *,
+ const struct radix_tree_iter *, void **slot, void *item);
+void radix_tree_replace_slot(struct radix_tree_root *root,
+ void **slot, void *item);
+void __radix_tree_delete_node(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ radix_tree_update_node_t update_node,
+ void *private);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
void radix_tree_clear_tags(struct radix_tree_root *root,
@@ -299,6 +330,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
+void radix_tree_iter_tag_set(struct radix_tree_root *root,
+ const struct radix_tree_iter *iter, unsigned int tag);
unsigned int
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items,
@@ -307,50 +340,18 @@ unsigned int
radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
unsigned long first_index, unsigned int max_items,
unsigned int tag);
-unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
- unsigned long *first_indexp, unsigned long last_index,
- unsigned long nr_to_tag,
- unsigned int fromtag, unsigned int totag);
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
static inline void radix_tree_preload_end(void)
{
preempt_enable();
}
-/**
- * struct radix_tree_iter - radix tree iterator state
- *
- * @index: index of current slot
- * @next_index: one beyond the last index for this chunk
- * @tags: bit-mask for tag-iterating
- * @shift: shift for the node that holds our slots
- *
- * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
- * subinterval of slots contained within one radix tree leaf node. It is
- * described by a pointer to its first slot and a struct radix_tree_iter
- * which holds the chunk's position in the tree and its size. For tagged
- * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
- * radix tree tag.
- */
-struct radix_tree_iter {
- unsigned long index;
- unsigned long next_index;
- unsigned long tags;
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- unsigned int shift;
-#endif
-};
-
-static inline unsigned int iter_shift(struct radix_tree_iter *iter)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- return iter->shift;
-#else
- return 0;
-#endif
-}
+int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
+int radix_tree_split(struct radix_tree_root *, unsigned long index,
+ unsigned new_order);
+int radix_tree_join(struct radix_tree_root *, unsigned long index,
+ unsigned new_order, void *);
#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
@@ -419,20 +420,17 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
}
/**
- * radix_tree_iter_next - resume iterating when the chunk may be invalid
- * @iter: iterator state
+ * radix_tree_iter_resume - resume iterating when the chunk may be invalid
+ * @slot: pointer to current slot
+ * @iter: iterator state
+ * Returns: New slot pointer
*
* If the iterator needs to release then reacquire a lock, the chunk may
* have been invalidated by an insertion or deletion. Call this function
- * to continue the iteration from the next index.
+ * before releasing the lock to continue the iteration from the next index.
*/
-static inline __must_check
-void **radix_tree_iter_next(struct radix_tree_iter *iter)
-{
- iter->next_index = __radix_tree_iter_add(iter, 1);
- iter->tags = 0;
- return NULL;
-}
+void **__must_check radix_tree_iter_resume(void **slot,
+ struct radix_tree_iter *iter);
/**
* radix_tree_chunk_size - get current chunk size
@@ -446,10 +444,17 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
return (iter->next_index - iter->index) >> iter_shift(iter);
}
-static inline struct radix_tree_node *entry_to_node(void *ptr)
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
+ unsigned flags);
+#else
+/* Can't happen without sibling entries, but the compiler can't tell that */
+static inline void ** __radix_tree_next_slot(void **slot,
+ struct radix_tree_iter *iter, unsigned flags)
{
- return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
+ return slot;
}
+#endif
/**
* radix_tree_next_slot - find next slot in chunk
@@ -463,7 +468,7 @@ static inline struct radix_tree_node *entry_to_node(void *ptr)
* For tagged lookup it also eats @iter->tags.
*
* There are several cases where 'slot' can be passed in as NULL to this
- * function. These cases result from the use of radix_tree_iter_next() or
+ * function. These cases result from the use of radix_tree_iter_resume() or
* radix_tree_iter_retry(). In these cases we don't end up dereferencing
* 'slot' because either:
* a) we are doing tagged iteration and iter->tags has been set to 0, or
@@ -474,51 +479,31 @@ static __always_inline void **
radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
{
if (flags & RADIX_TREE_ITER_TAGGED) {
- void *canon = slot;
-
iter->tags >>= 1;
if (unlikely(!iter->tags))
return NULL;
- while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
- radix_tree_is_internal_node(slot[1])) {
- if (entry_to_node(slot[1]) == canon) {
- iter->tags >>= 1;
- iter->index = __radix_tree_iter_add(iter, 1);
- slot++;
- continue;
- }
- iter->next_index = __radix_tree_iter_add(iter, 1);
- return NULL;
- }
if (likely(iter->tags & 1ul)) {
iter->index = __radix_tree_iter_add(iter, 1);
- return slot + 1;
+ slot++;
+ goto found;
}
if (!(flags & RADIX_TREE_ITER_CONTIG)) {
unsigned offset = __ffs(iter->tags);
- iter->tags >>= offset;
- iter->index = __radix_tree_iter_add(iter, offset + 1);
- return slot + offset + 1;
+ iter->tags >>= offset++;
+ iter->index = __radix_tree_iter_add(iter, offset);
+ slot += offset;
+ goto found;
}
} else {
long count = radix_tree_chunk_size(iter);
- void *canon = slot;
while (--count > 0) {
slot++;
iter->index = __radix_tree_iter_add(iter, 1);
- if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
- radix_tree_is_internal_node(*slot)) {
- if (entry_to_node(*slot) == canon)
- continue;
- iter->next_index = iter->index;
- break;
- }
-
if (likely(*slot))
- return slot;
+ goto found;
if (flags & RADIX_TREE_ITER_CONTIG) {
/* forbid switching to the next chunk */
iter->next_index = 0;
@@ -527,6 +512,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
}
}
return NULL;
+
+ found:
+ if (unlikely(radix_tree_is_internal_node(*slot)))
+ return __radix_tree_next_slot(slot, iter, flags);
+ return slot;
}
/**
@@ -577,6 +567,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
slot || (slot = radix_tree_next_chunk(root, iter, \
RADIX_TREE_ITER_TAGGED | tag)) ; \
slot = radix_tree_next_slot(slot, iter, \
- RADIX_TREE_ITER_TAGGED))
+ RADIX_TREE_ITER_TAGGED | tag))
#endif /* _LINUX_RADIX_TREE_H */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 57c9e0622a38..56375edf2ed2 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -77,8 +77,11 @@ extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
#ifdef CONFIG_PRINTK
-#define WARN_ON_RATELIMIT(condition, state) \
- WARN_ON((condition) && __ratelimit(state))
+#define WARN_ON_RATELIMIT(condition, state) ({ \
+ bool __rtn_cond = !!(condition); \
+ WARN_ON(__rtn_cond && __ratelimit(state)); \
+ __rtn_cond; \
+})
#define WARN_RATELIMIT(condition, format, ...) \
({ \
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 8beb98dcf14f..4f7a9561b8c4 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -45,19 +45,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
+ if (!__list_add_valid(new, prev, next))
+ return;
+
new->next = next;
new->prev = prev;
rcu_assign_pointer(list_next_rcu(prev), new);
next->prev = new;
}
-#else
-void __list_add_rcu(struct list_head *new,
- struct list_head *prev, struct list_head *next);
-#endif
/**
* list_add_rcu - add a new entry to rcu-protected list
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 692108222271..ea0fffa5faeb 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -120,6 +120,25 @@ struct regmap;
#define REGULATOR_EVENT_PRE_DISABLE 0x400
#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+/*
+ * Regulator errors that can be queried using regulator_get_error_flags
+ *
+ * UNDER_VOLTAGE Regulator output is under voltage.
+ * OVER_CURRENT Regulator output current is too high.
+ * REGULATION_OUT Regulator output is out of regulation.
+ * FAIL Regulator output has failed.
+ * OVER_TEMP Regulator over temp.
+ *
+ * NOTE: These errors can be OR'ed together.
+ */
+
+#define REGULATOR_ERROR_UNDER_VOLTAGE BIT(1)
+#define REGULATOR_ERROR_OVER_CURRENT BIT(2)
+#define REGULATOR_ERROR_REGULATION_OUT BIT(3)
+#define REGULATOR_ERROR_FAIL BIT(4)
+#define REGULATOR_ERROR_OVER_TEMP BIT(5)
+
+
/**
* struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
*
@@ -237,6 +256,8 @@ int regulator_get_current_limit(struct regulator *regulator);
int regulator_set_mode(struct regulator *regulator, unsigned int mode);
unsigned int regulator_get_mode(struct regulator *regulator);
+int regulator_get_error_flags(struct regulator *regulator,
+ unsigned int *flags);
int regulator_set_load(struct regulator *regulator, int load_uA);
int regulator_allow_bypass(struct regulator *regulator, bool allow);
@@ -477,6 +498,12 @@ static inline unsigned int regulator_get_mode(struct regulator *regulator)
return REGULATOR_MODE_NORMAL;
}
+static inline int regulator_get_error_flags(struct regulator *regulator,
+ unsigned int *flags)
+{
+ return -EINVAL;
+}
+
static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
return REGULATOR_MODE_NORMAL;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 37b532410528..dac8e7b16bc6 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -100,6 +100,7 @@ struct regulator_linear_range {
*
* @set_mode: Set the configured operating mode for the regulator.
* @get_mode: Get the configured operating mode for the regulator.
+ * @get_error_flags: Get the current error(s) for the regulator.
* @get_status: Return actual (not as-configured) status of regulator, as a
* REGULATOR_STATUS value (or negative errno)
* @get_optimum_mode: Get the most efficient operating mode for the regulator
@@ -169,6 +170,9 @@ struct regulator_ops {
int (*set_mode) (struct regulator_dev *, unsigned int mode);
unsigned int (*get_mode) (struct regulator_dev *);
+ /* retrieve current error flags on the regulator */
+ int (*get_error_flags)(struct regulator_dev *, unsigned int *flags);
+
/* Time taken to enable or set voltage on the regulator */
int (*enable_time) (struct regulator_dev *);
int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay);
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 930023b7c825..e2f3a3281d8f 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -400,6 +400,7 @@ enum rproc_crash_type {
* @firmware_loading_complete: marks e/o asynchronous firmware loading
* @bootaddr: address of first instruction to boot rproc with (optional)
* @rvdevs: list of remote virtio devices
+ * @subdevs: list of subdevices, to following the running state
* @notifyids: idr for dynamically assigning rproc-wide unique notify ids
* @index: index of this rproc device
* @crash_handler: workqueue for handling a crash
@@ -407,15 +408,14 @@ enum rproc_crash_type {
* @crash_comp: completion used to sync crash handler and the rproc reload
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
- * @table_ptr: pointer to the resource table in effect
- * @cached_table: copy of the resource table
+ * @table_ptr: our copy of the resource table
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
struct list_head node;
struct iommu_domain *domain;
const char *name;
- const char *firmware;
+ char *firmware;
void *priv;
const struct rproc_ops *ops;
struct device dev;
@@ -431,6 +431,7 @@ struct rproc {
struct completion firmware_loading_complete;
u32 bootaddr;
struct list_head rvdevs;
+ struct list_head subdevs;
struct idr notifyids;
int index;
struct work_struct crash_handler;
@@ -439,11 +440,23 @@ struct rproc {
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
- struct resource_table *cached_table;
bool has_iommu;
bool auto_boot;
};
+/**
+ * struct rproc_subdev - subdevice tied to a remoteproc
+ * @node: list node related to the rproc subdevs list
+ * @probe: probe function, called as the rproc is started
+ * @remove: remove function, called as the rproc is stopped
+ */
+struct rproc_subdev {
+ struct list_head node;
+
+ int (*probe)(struct rproc_subdev *subdev);
+ void (*remove)(struct rproc_subdev *subdev);
+};
+
/* we currently support only two vrings per rvdev */
#define RVDEV_NUM_VRINGS 2
@@ -472,6 +485,9 @@ struct rproc_vring {
/**
* struct rproc_vdev - remoteproc state for a supported virtio device
+ * @refcount: reference counter for the vdev and vring allocations
+ * @subdev: handle for registering the vdev as a rproc subdevice
+ * @id: virtio device id (as in virtio_ids.h)
* @node: list node
* @rproc: the rproc handle
* @vdev: the virio device
@@ -479,6 +495,11 @@ struct rproc_vring {
* @rsc_offset: offset of the vdev's resource entry
*/
struct rproc_vdev {
+ struct kref refcount;
+
+ struct rproc_subdev subdev;
+
+ unsigned int id;
struct list_head node;
struct rproc *rproc;
struct virtio_device vdev;
@@ -511,4 +532,11 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
return rvdev->rproc;
}
+void rproc_add_subdev(struct rproc *rproc,
+ struct rproc_subdev *subdev,
+ int (*probe)(struct rproc_subdev *subdev),
+ void (*remove)(struct rproc_subdev *subdev));
+
+void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
+
#endif /* REMOTEPROC_H */
diff --git a/include/linux/remoteproc/st_slim_rproc.h b/include/linux/remoteproc/st_slim_rproc.h
new file mode 100644
index 000000000000..4155556fa4b2
--- /dev/null
+++ b/include/linux/remoteproc/st_slim_rproc.h
@@ -0,0 +1,58 @@
+/*
+ * SLIM core rproc driver header
+ *
+ * Copyright (C) 2016 STMicroelectronics
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _ST_REMOTEPROC_SLIM_H
+#define _ST_REMOTEPROC_SLIM_H
+
+#define ST_SLIM_MEM_MAX 2
+#define ST_SLIM_MAX_CLK 4
+
+enum {
+ ST_SLIM_DMEM,
+ ST_SLIM_IMEM,
+};
+
+/**
+ * struct st_slim_mem - slim internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @size: Size of the memory region
+ */
+struct st_slim_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ size_t size;
+};
+
+/**
+ * struct st_slim_rproc - SLIM slim core
+ * @rproc: rproc handle
+ * @mem: slim memory information
+ * @slimcore: slim slimcore regs
+ * @peri: slim peripheral regs
+ * @clks: slim clocks
+ */
+struct st_slim_rproc {
+ struct rproc *rproc;
+ struct st_slim_mem mem[ST_SLIM_MEM_MAX];
+ void __iomem *slimcore;
+ void __iomem *peri;
+
+ /* st_slim_rproc private */
+ struct clk *clks[ST_SLIM_MAX_CLK];
+};
+
+struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
+ char *fw_name);
+void st_slim_rproc_put(struct st_slim_rproc *slim_rproc);
+
+#endif
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b0f305e77b7f..d9706a6f5ae2 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -40,7 +40,7 @@
#define _LINUX_RESERVATION_H
#include <linux/ww_mutex.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/slab.h>
#include <linux/seqlock.h>
#include <linux/rcupdate.h>
@@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[];
struct reservation_object_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
- struct fence __rcu *shared[];
+ struct dma_fence __rcu *shared[];
};
/**
@@ -74,7 +74,7 @@ struct reservation_object {
struct ww_mutex lock;
seqcount_t seq;
- struct fence __rcu *fence_excl;
+ struct dma_fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
struct reservation_object_list *staged;
};
@@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj)
{
int i;
struct reservation_object_list *fobj;
- struct fence *excl;
+ struct dma_fence *excl;
/*
* This object should be dead and all references must have
@@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj)
*/
excl = rcu_dereference_protected(obj->fence_excl, 1);
if (excl)
- fence_put(excl);
+ dma_fence_put(excl);
fobj = rcu_dereference_protected(obj->fence, 1);
if (fobj) {
for (i = 0; i < fobj->shared_count; ++i)
- fence_put(rcu_dereference_protected(fobj->shared[i], 1));
+ dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
kfree(fobj);
}
@@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj)
* RETURNS
* The exclusive fence or NULL
*/
-static inline struct fence *
+static inline struct dma_fence *
reservation_object_get_excl(struct reservation_object *obj)
{
return rcu_dereference_protected(obj->fence_excl,
@@ -173,35 +173,32 @@ reservation_object_get_excl(struct reservation_object *obj)
* RETURNS
* The exclusive fence or NULL if none
*/
-static inline struct fence *
+static inline struct dma_fence *
reservation_object_get_excl_rcu(struct reservation_object *obj)
{
- struct fence *fence;
- unsigned seq;
-retry:
- seq = read_seqcount_begin(&obj->seq);
+ struct dma_fence *fence;
+
+ if (!rcu_access_pointer(obj->fence_excl))
+ return NULL;
+
rcu_read_lock();
- fence = rcu_dereference(obj->fence_excl);
- if (read_seqcount_retry(&obj->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
- fence = fence_get(fence);
+ fence = dma_fence_get_rcu_safe(&obj->fence_excl);
rcu_read_unlock();
+
return fence;
}
int reservation_object_reserve_shared(struct reservation_object *obj);
void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct fence *fence);
+ struct dma_fence *fence);
void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct fence *fence);
+ struct dma_fence *fence);
int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct fence **pfence_excl,
+ struct dma_fence **pfence_excl,
unsigned *pshared_count,
- struct fence ***pshared);
+ struct dma_fence ***pshared);
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
new file mode 100644
index 000000000000..0d905d8ec553
--- /dev/null
+++ b/include/linux/restart_block.h
@@ -0,0 +1,51 @@
+/*
+ * Common syscall restarting data
+ */
+#ifndef __LINUX_RESTART_BLOCK_H
+#define __LINUX_RESTART_BLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct timespec;
+struct compat_timespec;
+struct pollfd;
+
+/*
+ * System call restart block.
+ */
+struct restart_block {
+ long (*fn)(struct restart_block *);
+ union {
+ /* For futex_wait and futex_wait_requeue_pi */
+ struct {
+ u32 __user *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+ u64 time;
+ u32 __user *uaddr2;
+ } futex;
+ /* For nanosleep */
+ struct {
+ clockid_t clockid;
+ struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+ struct compat_timespec __user *compat_rmtp;
+#endif
+ u64 expires;
+ } nanosleep;
+ /* For poll */
+ struct {
+ struct pollfd __user *ufds;
+ int nfds;
+ int has_timeout;
+ unsigned long tv_sec;
+ unsigned long tv_nsec;
+ } poll;
+ };
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#endif /* __LINUX_RESTART_BLOCK_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 4acc552e9279..b6d4568795a7 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -198,4 +198,10 @@ enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
};
+#ifdef CONFIG_RING_BUFFER
+int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
+#else
+#define trace_rb_cpu_prepare NULL
+#endif
+
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b46bb5620a76..15321fb1df6b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -137,11 +137,19 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
* anon_vma helper functions.
*/
void anon_vma_init(void); /* create anon_vma_cachep */
-int anon_vma_prepare(struct vm_area_struct *);
+int __anon_vma_prepare(struct vm_area_struct *);
void unlink_anon_vmas(struct vm_area_struct *);
int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
+static inline int anon_vma_prepare(struct vm_area_struct *vma)
+{
+ if (likely(vma->anon_vma))
+ return 0;
+
+ return __anon_vma_prepare(vma);
+}
+
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
{
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index e0aca1476001..64125443f8a6 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/kfifo.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -99,6 +100,8 @@ struct rmi_2d_sensor_platform_data {
bool topbuttonpad;
bool kernel_tracking;
int dmax;
+ int dribble;
+ int palm_detect;
};
/**
@@ -106,7 +109,7 @@ struct rmi_2d_sensor_platform_data {
* @buttonpad - the touchpad is a buttonpad, so enable only the first actual
* button that is found.
* @trackstick_buttons - Set when the function 30 is handling the physical
- * buttons of the trackstick (as a PD/2 passthrough device.
+ * buttons of the trackstick (as a PS/2 passthrough device).
* @disable - the touchpad incorrectly reports F30 and it should be ignored.
* This is a special case which is due to misconfigured firmware.
*/
@@ -116,14 +119,17 @@ struct rmi_f30_data {
bool disable;
};
-/**
- * struct rmi_f01_power - override default power management settings.
- *
+
+/*
+ * Set the state of a register
+ * DEFAULT - use the default value set by the firmware config
+ * OFF - explicitly disable the register
+ * ON - explicitly enable the register
*/
-enum rmi_f01_nosleep {
- RMI_F01_NOSLEEP_DEFAULT = 0,
- RMI_F01_NOSLEEP_OFF = 1,
- RMI_F01_NOSLEEP_ON = 2
+enum rmi_reg_state {
+ RMI_REG_STATE_DEFAULT = 0,
+ RMI_REG_STATE_OFF = 1,
+ RMI_REG_STATE_ON = 2
};
/**
@@ -143,7 +149,7 @@ enum rmi_f01_nosleep {
* when the touch sensor is in doze mode, in units of 10ms.
*/
struct rmi_f01_power_management {
- enum rmi_f01_nosleep nosleep;
+ enum rmi_reg_state nosleep;
u8 wakeup_threshold;
u8 doze_holdoff;
u8 doze_interval;
@@ -204,16 +210,18 @@ struct rmi_device_platform_data_spi {
* @reset_delay_ms - after issuing a reset command to the touch sensor, the
* driver waits a few milliseconds to give the firmware a chance to
* to re-initialize. You can override the default wait period here.
+ * @irq: irq associated with the attn gpio line, or negative
*/
struct rmi_device_platform_data {
int reset_delay_ms;
+ int irq;
struct rmi_device_platform_data_spi spi_data;
/* function handler pdata */
- struct rmi_2d_sensor_platform_data *sensor_pdata;
+ struct rmi_2d_sensor_platform_data sensor_pdata;
struct rmi_f01_power_management power_management;
- struct rmi_f30_data *f30_data;
+ struct rmi_f30_data f30_data;
};
/**
@@ -264,9 +272,6 @@ struct rmi_transport_dev {
struct rmi_device_platform_data pdata;
struct input_dev *input;
-
- void *attn_data;
- int attn_size;
};
/**
@@ -324,17 +329,24 @@ struct rmi_device {
};
+struct rmi4_attn_data {
+ unsigned long irq_status;
+ size_t size;
+ void *data;
+};
+
struct rmi_driver_data {
struct list_head function_list;
struct rmi_device *rmi_dev;
struct rmi_function *f01_container;
- bool f01_bootloader_mode;
+ struct rmi_function *f34_container;
+ bool bootloader_mode;
- u32 attn_count;
int num_of_irq_regs;
int irq_count;
+ void *irq_memory;
unsigned long *irq_status;
unsigned long *fn_irq_bits;
unsigned long *current_irq_mask;
@@ -343,17 +355,23 @@ struct rmi_driver_data {
struct input_dev *input;
u8 pdt_props;
- u8 bsr;
+
+ u8 num_rx_electrodes;
+ u8 num_tx_electrodes;
bool enabled;
+ struct mutex enabled_mutex;
- void *data;
+ struct rmi4_attn_data attn_data;
+ DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16);
};
int rmi_register_transport_device(struct rmi_transport_dev *xport);
void rmi_unregister_transport_device(struct rmi_transport_dev *xport);
-int rmi_process_interrupt_requests(struct rmi_device *rmi_dev);
-int rmi_driver_suspend(struct rmi_device *rmi_dev);
-int rmi_driver_resume(struct rmi_device *rmi_dev);
+void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
+ void *data, size_t size);
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake);
+int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake);
#endif
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 452d393cc8dd..18f9e1ae4b7e 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/mod_devicetable.h>
#include <linux/kref.h>
#include <linux/mutex.h>
@@ -64,6 +65,7 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
+ * @driver_override: driver name to force a match
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
@@ -72,6 +74,7 @@ struct rpmsg_channel_info {
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
+ char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
@@ -132,6 +135,8 @@ struct rpmsg_driver {
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
};
+#if IS_ENABLED(CONFIG_RPMSG)
+
int register_rpmsg_device(struct rpmsg_device *dev);
void unregister_rpmsg_device(struct rpmsg_device *dev);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
@@ -141,6 +146,116 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
+int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
+int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
+
+int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
+int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
+
+#else
+
+static inline int register_rpmsg_device(struct rpmsg_device *dev)
+{
+ return -ENXIO;
+}
+
+static inline void unregister_rpmsg_device(struct rpmsg_device *dev)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline int __register_rpmsg_driver(struct rpmsg_driver *drv,
+ struct module *owner)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline void unregister_rpmsg_driver(struct rpmsg_driver *drv)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev,
+ rpmsg_rx_cb_t cb,
+ void *priv,
+ struct rpmsg_channel_info chinfo)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
+ u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+
+}
+
+static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
+ u32 dst, void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
+ int len, u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
+ u32 dst, void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
+#endif /* IS_ENABLED(CONFIG_RPMSG) */
+
/* use a macro to avoid include chaining to get THIS_MODULE */
#define register_rpmsg_driver(drv) \
__register_rpmsg_driver(drv, THIS_MODULE)
@@ -157,14 +272,4 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
module_driver(__rpmsg_driver, register_rpmsg_driver, \
unregister_rpmsg_driver)
-int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
-int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
-int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
-
-int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
-int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
-int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
-
#endif /* _LINUX_RPMSG_H */
diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h
new file mode 100644
index 000000000000..e674b2e3074b
--- /dev/null
+++ b/include/linux/rpmsg/qcom_smd.h
@@ -0,0 +1,33 @@
+
+#ifndef _LINUX_RPMSG_QCOM_SMD_H
+#define _LINUX_RPMSG_QCOM_SMD_H
+
+#include <linux/device.h>
+
+struct qcom_smd_edge;
+
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD)
+
+struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
+ struct device_node *node);
+int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
+
+#else
+
+static inline struct qcom_smd_edge *
+qcom_smd_register_edge(struct device *parent,
+ struct device_node *node)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return -ENXIO;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e9c009dc3a4a..4d1905245c7a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -262,20 +262,9 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
- smp_store_mb((tsk)->state, (state_value)); \
+ smp_store_mb((tsk)->state, (state_value)); \
} while (0)
-/*
- * set_current_state() includes a barrier so that the write of current->state
- * is correctly serialised wrt the caller's subsequent test of whether to
- * actually sleep:
- *
- * set_current_state(TASK_UNINTERRUPTIBLE);
- * if (do_i_need_to_sleep())
- * schedule();
- *
- * If the caller does not need such serialisation then use __set_current_state()
- */
#define __set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
@@ -284,11 +273,19 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->state, (state_value)); \
+ smp_store_mb(current->state, (state_value)); \
} while (0)
#else
+/*
+ * @tsk had better be current, or you get to keep the pieces.
+ *
+ * The only reason is that computing current can be more expensive than
+ * using a pointer that's already available.
+ *
+ * Therefore, see set_current_state().
+ */
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
@@ -299,11 +296,34 @@ extern char ___assert_task_state[1 - 2*!!(
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
+ * for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
- * if (do_i_need_to_sleep())
- * schedule();
+ * if (!need_sleep)
+ * break;
*
- * If the caller does not need such serialisation then use __set_current_state()
+ * schedule();
+ * }
+ * __set_current_state(TASK_RUNNING);
+ *
+ * If the caller does not need such serialisation (because, for instance, the
+ * condition test and condition change and wakeup are under the same lock) then
+ * use __set_current_state().
+ *
+ * The above is typically ordered against the wakeup, which does:
+ *
+ * need_sleep = false;
+ * wake_up_state(p, TASK_UNINTERRUPTIBLE);
+ *
+ * Where wake_up_state() (and all other wakeup primitives) imply enough
+ * barriers to order the store of the variable against wakeup.
+ *
+ * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
+ * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
+ * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
+ *
+ * This is obviously fine, since they both store the exact same value.
+ *
+ * Also see the comments of try_to_wake_up().
*/
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
@@ -520,7 +540,11 @@ static inline int get_dumpable(struct mm_struct *mm)
/* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
-#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
+/*
+ * This one-shot flag is dropped due to necessity of changing exe once again
+ * on NFS restore
+ */
+//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
@@ -989,7 +1013,7 @@ enum cpu_idle_type {
* already in a wake queue, the wakeup will happen soon and the second
* waker can just skip it.
*
- * The WAKE_Q macro declares and initializes the list head.
+ * The DEFINE_WAKE_Q macro declares and initializes the list head.
* wake_up_q() does NOT reinitialize the list; it's expected to be
* called near the end of a function, where the fact that the queue is
* not used again will be easy to see by inspection.
@@ -1009,7 +1033,7 @@ struct wake_q_head {
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-#define WAKE_Q(name) \
+#define DEFINE_WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
@@ -1057,6 +1081,8 @@ static inline int cpu_numa_flags(void)
}
#endif
+extern int arch_asym_cpu_priority(int cpu);
+
struct sched_domain_attr {
int relax_domain_level;
};
@@ -1627,7 +1653,10 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t utime, stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ cputime_t utimescaled, stimescaled;
+#endif
cputime_t gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -1656,6 +1685,7 @@ struct task_struct {
struct list_head cpu_timers[3];
/* process credentials */
+ const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
const struct cred __rcu *real_cred; /* objective and real subjective task
* credentials (COW) */
const struct cred __rcu *cred; /* effective (overridable) subjective task
@@ -1791,6 +1821,9 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
+#ifdef CONFIG_INTEL_RDT_A
+ int closid;
+#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
@@ -2220,40 +2253,45 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime);
-extern void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled, cputime_t *stimescaled);
extern cputime_t task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime)
{
- if (utime)
- *utime = t->utime;
- if (stime)
- *stime = t->stime;
+ *utime = t->utime;
+ *stime = t->stime;
}
+static inline cputime_t task_gtime(struct task_struct *t)
+{
+ return t->gtime;
+}
+#endif
+
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
static inline void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
{
- if (utimescaled)
- *utimescaled = t->utimescaled;
- if (stimescaled)
- *stimescaled = t->stimescaled;
+ *utimescaled = t->utimescaled;
+ *stimescaled = t->stimescaled;
}
-
-static inline cputime_t task_gtime(struct task_struct *t)
+#else
+static inline void task_cputime_scaled(struct task_struct *t,
+ cputime_t *utimescaled,
+ cputime_t *stimescaled)
{
- return t->gtime;
+ task_cputime(t, utimescaled, stimescaled);
}
#endif
+
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
/*
* Per process flags
*/
+#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -2444,6 +2482,10 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
+#ifndef cpu_relax_yield
+#define cpu_relax_yield() cpu_relax()
+#endif
+
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -2611,7 +2653,7 @@ extern struct task_struct *idle_task(int cpu);
*/
static inline bool is_idle_task(const struct task_struct *p)
{
- return p->pid == 0;
+ return !!(p->flags & PF_IDLE);
}
extern struct task_struct *curr_task(int cpu);
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
@@ -3508,6 +3550,18 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+/*
+ * In order to reduce various lock holder preemption latencies provide an
+ * interface to see if a vCPU is currently running or not.
+ *
+ * This allows us to terminate optimistic spin loops and block, analogous to
+ * the native optimistic spin heuristic of testing if the lock owner task is
+ * running or not.
+ */
+#ifndef vcpu_is_preempted
+# define vcpu_is_preempted(cpu) false
+#endif
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 22db1e63707e..441145351301 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -36,7 +36,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
diff --git a/include/linux/seg6.h b/include/linux/seg6.h
new file mode 100644
index 000000000000..7a66d2b4c5a6
--- /dev/null
+++ b/include/linux/seg6.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_H
+#define _LINUX_SEG6_H
+
+#include <uapi/linux/seg6.h>
+
+#endif
diff --git a/include/linux/seg6_genl.h b/include/linux/seg6_genl.h
new file mode 100644
index 000000000000..d6c3fb4f3734
--- /dev/null
+++ b/include/linux/seg6_genl.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_GENL_H
+#define _LINUX_SEG6_GENL_H
+
+#include <uapi/linux/seg6_genl.h>
+
+#endif
diff --git a/include/linux/seg6_hmac.h b/include/linux/seg6_hmac.h
new file mode 100644
index 000000000000..da437ebdc6cd
--- /dev/null
+++ b/include/linux/seg6_hmac.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_HMAC_H
+#define _LINUX_SEG6_HMAC_H
+
+#include <uapi/linux/seg6_hmac.h>
+
+#endif
diff --git a/include/linux/seg6_iptunnel.h b/include/linux/seg6_iptunnel.h
new file mode 100644
index 000000000000..5377cf6a5a02
--- /dev/null
+++ b/include/linux/seg6_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_IPTUNNEL_H
+#define _LINUX_SEG6_IPTUNNEL_H
+
+#include <uapi/linux/seg6_iptunnel.h>
+
+#endif
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
index a1ba6a5ccdd6..c58c535d12a8 100644
--- a/include/linux/seqno-fence.h
+++ b/include/linux/seqno-fence.h
@@ -20,7 +20,7 @@
#ifndef __LINUX_SEQNO_FENCE_H
#define __LINUX_SEQNO_FENCE_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/dma-buf.h>
enum seqno_fence_condition {
@@ -29,15 +29,15 @@ enum seqno_fence_condition {
};
struct seqno_fence {
- struct fence base;
+ struct dma_fence base;
- const struct fence_ops *ops;
+ const struct dma_fence_ops *ops;
struct dma_buf *sync_buf;
uint32_t seqno_ofs;
enum seqno_fence_condition condition;
};
-extern const struct fence_ops seqno_fence_ops;
+extern const struct dma_fence_ops seqno_fence_ops;
/**
* to_seqno_fence - cast a fence to a seqno_fence
@@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops;
* or the seqno_fence otherwise.
*/
static inline struct seqno_fence *
-to_seqno_fence(struct fence *fence)
+to_seqno_fence(struct dma_fence *fence)
{
if (fence->ops != &seqno_fence_ops)
return NULL;
@@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence)
* dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
* device's vm can be expensive.
*
- * It is recommended for creators of seqno_fence to call fence_signal
+ * It is recommended for creators of seqno_fence to call dma_fence_signal()
* before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check fence_is_signaled
+ * time of issue vs time of check, since users can check dma_fence_is_signaled()
* before submitting instructions for the hardware to wait on the fence.
* However, when ops.enable_signaling is not called, it doesn't have to be
* done as soon as possible, just before there's any real danger of seqno
@@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
struct dma_buf *sync_buf, uint32_t context,
uint32_t seqno_ofs, uint32_t seqno,
enum seqno_fence_condition cond,
- const struct fence_ops *ops)
+ const struct dma_fence_ops *ops)
{
BUG_ON(!fence || !sync_buf || !ops);
BUG_ON(!ops->wait || !ops->enable_signaling ||
!ops->get_driver_name || !ops->get_timeline_name);
/*
- * ops is used in fence_init for get_driver_name, so needs to be
+ * ops is used in dma_fence_init for get_driver_name, so needs to be
* initialized first
*/
fence->ops = ops;
- fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
+ dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
get_dma_buf(sync_buf);
fence->sync_buf = sync_buf;
fence->seqno_ofs = seqno_ofs;
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 48ec7651989b..61fbb440449c 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -36,6 +36,8 @@ struct plat_serial8250_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ void (*set_ldisc)(struct uart_port *,
+ struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
@@ -94,7 +96,7 @@ struct uart_8250_port {
struct uart_port port;
struct timer_list timer; /* "no irq" timer */
struct list_head list; /* ports on this IRQ */
- unsigned short capabilities; /* port capabilities */
+ u32 capabilities; /* port capabilities */
unsigned short bugs; /* port bugs */
bool fifo_bug; /* min RX trigger if enabled */
unsigned int tx_loadsz; /* transmit fifo load size */
@@ -149,6 +151,8 @@ extern int early_serial8250_setup(struct earlycon_device *device,
const char *options);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
+extern void serial8250_do_set_ldisc(struct uart_port *port,
+ struct ktermios *termios);
extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
extern int serial8250_do_startup(struct uart_port *port);
extern void serial8250_do_shutdown(struct uart_port *port);
@@ -168,6 +172,6 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
- unsigned short *capabilities));
+ u32 *capabilities));
#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 344201437017..5def8e830fb0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -111,8 +111,8 @@ struct uart_icount {
__u32 buf_overrun;
};
-typedef unsigned int __bitwise__ upf_t;
-typedef unsigned int __bitwise__ upstat_t;
+typedef unsigned int __bitwise upf_t;
+typedef unsigned int __bitwise upstat_t;
struct uart_port {
spinlock_t lock; /* port lock */
@@ -123,6 +123,8 @@ struct uart_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ void (*set_ldisc)(struct uart_port *,
+ struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
void (*set_mctrl)(struct uart_port *, unsigned int);
int (*startup)(struct uart_port *port);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b63f63eaa39c..5308304993be 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set)
}
}
+static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
+{
+ switch (_NSIG_WORDS) {
+ case 4:
+ return (set1->sig[3] == set2->sig[3]) &&
+ (set1->sig[2] == set2->sig[2]) &&
+ (set1->sig[1] == set2->sig[1]) &&
+ (set1->sig[0] == set2->sig[0]);
+ case 2:
+ return (set1->sig[1] == set2->sig[1]) &&
+ (set1->sig[0] == set2->sig[0]);
+ case 1:
+ return set1->sig[0] == set2->sig[0];
+ }
+ return 0;
+}
+
#define sigmask(sig) (1UL << ((sig) - 1))
#ifndef __HAVE_ARCH_SIG_SETOPS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 32810f279f8e..b53c0cfd417e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -645,8 +645,15 @@ struct sk_buff {
struct rb_node rbnode; /* used in netem & tcp stack */
};
struct sock *sk;
- struct net_device *dev;
+ union {
+ struct net_device *dev;
+ /* Some protocols might use this space to store information,
+ * while device pointer would be NULL.
+ * UDP receive path is one user.
+ */
+ unsigned long dev_scratch;
+ };
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
@@ -1087,7 +1094,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
}
void __skb_get_hash(struct sk_buff *skb);
-u32 __skb_get_hash_symmetric(struct sk_buff *skb);
+u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen);
@@ -1799,11 +1806,11 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb)
return skb->len - skb->data_len;
}
-static inline int skb_pagelen(const struct sk_buff *skb)
+static inline unsigned int skb_pagelen(const struct sk_buff *skb)
{
- int i, len = 0;
+ unsigned int i, len = 0;
- for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
+ for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
return len + skb_headlen(skb);
}
@@ -1966,6 +1973,8 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
+void skb_condense(struct sk_buff *skb);
+
/**
* skb_headroom - bytes at buffer head
* @skb: buffer to check
@@ -2809,12 +2818,12 @@ static inline int skb_add_data(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_NONE) {
__wsum csum = 0;
- if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
- &csum, from) == copy) {
+ if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
+ &csum, from)) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
- } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
+ } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
return 0;
__skb_trim(skb, off);
@@ -3033,9 +3042,13 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
const struct sk_buff *skb);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb),
int *peeked, int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb),
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
@@ -3214,7 +3227,7 @@ static inline ktime_t net_timedelta(ktime_t t)
static inline ktime_t net_invalid_timestamp(void)
{
- return ktime_set(0, 0);
+ return 0;
}
struct sk_buff *skb_clone_sk(struct sk_buff *skb);
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
index e302c447e057..129bc674dcf5 100644
--- a/include/linux/smc91x.h
+++ b/include/linux/smc91x.h
@@ -39,6 +39,7 @@ struct smc91x_platdata {
unsigned long flags;
unsigned char leda;
unsigned char ledb;
+ bool pxa_u16_align4; /* PXA buggy u16 writes on 4*n+2 addresses */
};
#endif /* __SMC91X_H__ */
diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h
index a37bc5538f19..eab64976a73b 100644
--- a/include/linux/soc/qcom/wcnss_ctrl.h
+++ b/include/linux/soc/qcom/wcnss_ctrl.h
@@ -3,6 +3,19 @@
#include <linux/soc/qcom/smd.h>
+#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL)
+
struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
+#else
+
+static inline struct qcom_smd_channel*
+qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+{
+ WARN_ON(1);
+ return ERR_PTR(-ENXIO);
+}
+
+#endif
+
#endif
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h
new file mode 100644
index 000000000000..a18e0783946b
--- /dev/null
+++ b/include/linux/soc/renesas/rcar-rst.h
@@ -0,0 +1,6 @@
+#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__
+#define __LINUX_SOC_RENESAS_RCAR_RST_H__
+
+int rcar_rst_read_mode_pins(u32 *mode);
+
+#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
new file mode 100644
index 000000000000..0ccbc138c26a
--- /dev/null
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -0,0 +1,249 @@
+/*
+ * Texas Instruments System Control Interface Protocol
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TISCI_PROTOCOL_H
+#define __TISCI_PROTOCOL_H
+
+/**
+ * struct ti_sci_version_info - version information structure
+ * @abi_major: Major ABI version. Change here implies risk of backward
+ * compatibility break.
+ * @abi_minor: Minor ABI version. Change here implies new feature addition,
+ * or compatible change in ABI.
+ * @firmware_revision: Firmware revision (not usually used).
+ * @firmware_description: Firmware description (not usually used).
+ */
+struct ti_sci_version_info {
+ u8 abi_major;
+ u8 abi_minor;
+ u16 firmware_revision;
+ char firmware_description[32];
+};
+
+struct ti_sci_handle;
+
+/**
+ * struct ti_sci_core_ops - SoC Core Operations
+ * @reboot_device: Reboot the SoC
+ * Returns 0 for successful request(ideally should never return),
+ * else returns corresponding error value.
+ */
+struct ti_sci_core_ops {
+ int (*reboot_device)(const struct ti_sci_handle *handle);
+};
+
+/**
+ * struct ti_sci_dev_ops - Device control operations
+ * @get_device: Command to request for device managed by TISCI
+ * Returns 0 for successful exclusive request, else returns
+ * corresponding error message.
+ * @idle_device: Command to idle a device managed by TISCI
+ * Returns 0 for successful exclusive request, else returns
+ * corresponding error message.
+ * @put_device: Command to release a device managed by TISCI
+ * Returns 0 for successful release, else returns corresponding
+ * error message.
+ * @is_valid: Check if the device ID is a valid ID.
+ * Returns 0 if the ID is valid, else returns corresponding error.
+ * @get_context_loss_count: Command to retrieve context loss counter - this
+ * increments every time the device looses context. Overflow
+ * is possible.
+ * - count: pointer to u32 which will retrieve counter
+ * Returns 0 for successful information request and count has
+ * proper data, else returns corresponding error message.
+ * @is_idle: Reports back about device idle state
+ * - req_state: Returns requested idle state
+ * Returns 0 for successful information request and req_state and
+ * current_state has proper data, else returns corresponding error
+ * message.
+ * @is_stop: Reports back about device stop state
+ * - req_state: Returns requested stop state
+ * - current_state: Returns current stop state
+ * Returns 0 for successful information request and req_state and
+ * current_state has proper data, else returns corresponding error
+ * message.
+ * @is_on: Reports back about device ON(or active) state
+ * - req_state: Returns requested ON state
+ * - current_state: Returns current ON state
+ * Returns 0 for successful information request and req_state and
+ * current_state has proper data, else returns corresponding error
+ * message.
+ * @is_transitioning: Reports back if the device is in the middle of transition
+ * of state.
+ * -current_state: Returns 'true' if currently transitioning.
+ * @set_device_resets: Command to configure resets for device managed by TISCI.
+ * -reset_state: Device specific reset bit field
+ * Returns 0 for successful request, else returns
+ * corresponding error message.
+ * @get_device_resets: Command to read state of resets for device managed
+ * by TISCI.
+ * -reset_state: pointer to u32 which will retrieve resets
+ * Returns 0 for successful request, else returns
+ * corresponding error message.
+ *
+ * NOTE: for all these functions, the following parameters are generic in
+ * nature:
+ * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * -id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ */
+struct ti_sci_dev_ops {
+ int (*get_device)(const struct ti_sci_handle *handle, u32 id);
+ int (*idle_device)(const struct ti_sci_handle *handle, u32 id);
+ int (*put_device)(const struct ti_sci_handle *handle, u32 id);
+ int (*is_valid)(const struct ti_sci_handle *handle, u32 id);
+ int (*get_context_loss_count)(const struct ti_sci_handle *handle,
+ u32 id, u32 *count);
+ int (*is_idle)(const struct ti_sci_handle *handle, u32 id,
+ bool *requested_state);
+ int (*is_stop)(const struct ti_sci_handle *handle, u32 id,
+ bool *req_state, bool *current_state);
+ int (*is_on)(const struct ti_sci_handle *handle, u32 id,
+ bool *req_state, bool *current_state);
+ int (*is_transitioning)(const struct ti_sci_handle *handle, u32 id,
+ bool *current_state);
+ int (*set_device_resets)(const struct ti_sci_handle *handle, u32 id,
+ u32 reset_state);
+ int (*get_device_resets)(const struct ti_sci_handle *handle, u32 id,
+ u32 *reset_state);
+};
+
+/**
+ * struct ti_sci_clk_ops - Clock control operations
+ * @get_clock: Request for activation of clock and manage by processor
+ * - needs_ssc: 'true' if Spread Spectrum clock is desired.
+ * - can_change_freq: 'true' if frequency change is desired.
+ * - enable_input_term: 'true' if input termination is desired.
+ * @idle_clock: Request for Idling a clock managed by processor
+ * @put_clock: Release the clock to be auto managed by TISCI
+ * @is_auto: Is the clock being auto managed
+ * - req_state: state indicating if the clock is auto managed
+ * @is_on: Is the clock ON
+ * - req_state: if the clock is requested to be forced ON
+ * - current_state: if the clock is currently ON
+ * @is_off: Is the clock OFF
+ * - req_state: if the clock is requested to be forced OFF
+ * - current_state: if the clock is currently Gated
+ * @set_parent: Set the clock source of a specific device clock
+ * - parent_id: Parent clock identifier to set.
+ * @get_parent: Get the current clock source of a specific device clock
+ * - parent_id: Parent clock identifier which is the parent.
+ * @get_num_parents: Get the number of parents of the current clock source
+ * - num_parents: returns the number of parent clocks.
+ * @get_best_match_freq: Find a best matching frequency for a frequency
+ * range.
+ * - match_freq: Best matching frequency in Hz.
+ * @set_freq: Set the Clock frequency
+ * @get_freq: Get the Clock frequency
+ * - current_freq: Frequency in Hz that the clock is at.
+ *
+ * NOTE: for all these functions, the following parameters are generic in
+ * nature:
+ * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * -did: Device identifier this request is for
+ * -cid: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * -min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * -target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * -max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ *
+ * Request for the clock - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_clock with put_clock. No refcounting is
+ * managed by driver for that purpose.
+ */
+struct ti_sci_clk_ops {
+ int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ bool needs_ssc, bool can_change_freq,
+ bool enable_input_term);
+ int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid);
+ int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid);
+ int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ bool *req_state);
+ int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ bool *req_state, bool *current_state);
+ int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ bool *req_state, bool *current_state);
+ int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ u8 parent_id);
+ int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ u8 *parent_id);
+ int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did,
+ u8 cid, u8 *num_parents);
+ int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did,
+ u8 cid, u64 min_freq, u64 target_freq,
+ u64 max_freq, u64 *match_freq);
+ int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ u64 min_freq, u64 target_freq, u64 max_freq);
+ int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ u64 *current_freq);
+};
+
+/**
+ * struct ti_sci_ops - Function support for TI SCI
+ * @dev_ops: Device specific operations
+ * @clk_ops: Clock specific operations
+ */
+struct ti_sci_ops {
+ struct ti_sci_core_ops core_ops;
+ struct ti_sci_dev_ops dev_ops;
+ struct ti_sci_clk_ops clk_ops;
+};
+
+/**
+ * struct ti_sci_handle - Handle returned to TI SCI clients for usage.
+ * @version: structure containing version information
+ * @ops: operations that are made available to TI SCI clients
+ */
+struct ti_sci_handle {
+ struct ti_sci_version_info version;
+ struct ti_sci_ops ops;
+};
+
+#if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL)
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev);
+int ti_sci_put_handle(const struct ti_sci_handle *handle);
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev);
+
+#else /* CONFIG_TI_SCI_PROTOCOL */
+
+static inline const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+ return -EINVAL;
+}
+
+static inline
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif /* CONFIG_TI_SCI_PROTOCOL */
+
+#endif /* __TISCI_PROTOCOL_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 4b743ac35396..75c6bd0ac605 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -442,6 +442,7 @@ struct spi_master {
#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */
#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */
+#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
/*
* on some hardware transfer / message size may be constrained
diff --git a/include/linux/stm.h b/include/linux/stm.h
index 8369d8a8cabd..210ff2292361 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -133,7 +133,7 @@ int stm_source_register_device(struct device *parent,
struct stm_source_data *data);
void stm_source_unregister_device(struct stm_source_data *data);
-int stm_source_write(struct stm_source_data *data, unsigned int chan,
- const char *buf, size_t count);
+int notrace stm_source_write(struct stm_source_data *data, unsigned int chan,
+ const char *buf, size_t count);
#endif /* _STM_H_ */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 705840e0438f..266dab9ad782 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data {
struct stmmac_dma_cfg {
int pbl;
+ int txpbl;
+ int rxpbl;
+ bool pblx8;
int fixed_burst;
int mixed_burst;
bool aal;
@@ -135,8 +138,6 @@ struct plat_stmmacenet_data {
void (*bus_setup)(void __iomem *ioaddr);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
- void (*suspend)(struct platform_device *pdev, void *priv);
- void (*resume)(struct platform_device *pdev, void *priv);
void *bsp_priv;
struct stmmac_axi *axi;
int has_gmac4;
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cc3ae16eac68..757fb963696c 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -79,7 +79,6 @@ struct svc_rdma_op_ctxt {
struct ib_cqe reg_cqe;
struct ib_cqe inv_cqe;
struct list_head dto_q;
- enum ib_wc_status wc_status;
u32 byte_len;
u32 position;
struct svcxprt_rdma *xprt;
@@ -139,7 +138,7 @@ struct svcxprt_rdma {
int sc_max_sge_rd; /* max sge for read target */
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
- atomic_t sc_sq_count; /* Number of SQ WR on queue */
+ atomic_t sc_sq_avail; /* SQEs ready to be consumed */
unsigned int sc_sq_depth; /* Depth of SQ */
unsigned int sc_rq_depth; /* Depth of RQ */
u32 sc_max_requests; /* Forward credits */
@@ -148,7 +147,6 @@ struct svcxprt_rdma {
struct ib_pd *sc_pd;
- atomic_t sc_dma_used;
spinlock_t sc_ctxt_lock;
struct list_head sc_ctxts;
int sc_ctxt_used;
@@ -200,7 +198,6 @@ static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
struct svc_rdma_op_ctxt *ctxt)
{
ctxt->mapped_sges++;
- atomic_inc(&rdma->sc_dma_used);
}
/* svc_rdma_backchannel.c */
@@ -236,8 +233,6 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
struct svc_rdma_req_map *, bool);
extern int svc_rdma_sendto(struct svc_rqst *);
-extern struct rpcrdma_read_chunk *
- svc_rdma_get_read_chunk(struct rpcrdma_msg *);
extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
int);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d9718378a8be..0c729c3c8549 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,6 +194,8 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
+extern suspend_state_t mem_sleep_default;
+
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a56523cefb9b..09f4be179ff3 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -246,39 +246,7 @@ struct swap_info_struct {
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
-extern struct list_lru workingset_shadow_nodes;
-
-static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
-{
- return node->count & RADIX_TREE_COUNT_MASK;
-}
-
-static inline void workingset_node_pages_inc(struct radix_tree_node *node)
-{
- node->count++;
-}
-
-static inline void workingset_node_pages_dec(struct radix_tree_node *node)
-{
- VM_WARN_ON_ONCE(!workingset_node_pages(node));
- node->count--;
-}
-
-static inline unsigned int workingset_node_shadows(struct radix_tree_node *node)
-{
- return node->count >> RADIX_TREE_COUNT_SHIFT;
-}
-
-static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
-{
- node->count += 1U << RADIX_TREE_COUNT_SHIFT;
-}
-
-static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
-{
- VM_WARN_ON_ONCE(!workingset_node_shadows(node));
- node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
-}
+void workingset_update_node(struct radix_tree_node *node, void *private);
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
@@ -351,6 +319,9 @@ extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_SWAP
+
+#include <linux/blk_types.h> /* for bio_end_io_t */
+
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 5f81f8a187f2..4ee479f2f355 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -9,7 +9,13 @@ struct device;
struct page;
struct scatterlist;
-extern int swiotlb_force;
+enum swiotlb_force {
+ SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
+ SWIOTLB_FORCE, /* swiotlb=force */
+ SWIOTLB_NO_FORCE, /* swiotlb=noforce */
+};
+
+extern enum swiotlb_force swiotlb_force;
/*
* Maximum allowable number of contiguous slabs to map,
@@ -44,11 +50,13 @@ enum dma_sync_target {
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size,
- enum dma_data_direction dir);
+ enum dma_data_direction dir,
+ unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir);
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
extern void swiotlb_tbl_sync_single(struct device *hwdev,
phys_addr_t tlb_addr,
@@ -73,14 +81,6 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
unsigned long attrs);
extern int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
-
-extern void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
-
-extern int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
unsigned long attrs);
@@ -114,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
extern void __init swiotlb_free(void);
+unsigned int swiotlb_max_segment(void);
#else
static inline void swiotlb_free(void) { }
+static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
+extern void swiotlb_set_max_segment(unsigned int);
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index aa17ccfc2f57..3e3ab84fc4cd 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -18,8 +18,8 @@
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/fence.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
/**
* struct sync_file - sync file to export to the userspace
@@ -41,13 +41,13 @@ struct sync_file {
wait_queue_head_t wq;
- struct fence *fence;
- struct fence_cb cb;
+ struct dma_fence *fence;
+ struct dma_fence_cb cb;
};
-#define POLL_ENABLED FENCE_FLAG_USER_BITS
+#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
-struct sync_file *sync_file_create(struct fence *fence);
-struct fence *sync_file_get_fence(int fd);
+struct sync_file *sync_file_create(struct dma_fence *fence);
+struct dma_fence *sync_file_get_fence(int fd);
#endif /* _LINUX_SYNC_H */
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
index 2739ccb69571..bed223b70217 100644
--- a/include/linux/sys_soc.h
+++ b/include/linux/sys_soc.h
@@ -13,6 +13,7 @@ struct soc_device_attribute {
const char *family;
const char *revision;
const char *soc_id;
+ const void *data;
};
/**
@@ -34,4 +35,12 @@ void soc_device_unregister(struct soc_device *soc_dev);
*/
struct device *soc_device_to_device(struct soc_device *soc);
+#ifdef CONFIG_SOC_BUS
+const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches);
+#else
+static inline const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches) { return NULL; }
+#endif
+
#endif /* __SOC_BUS_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a17ae7b85218..fc5848dad7a4 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -123,6 +123,7 @@ struct tcp_request_sock {
u32 txhash;
u32 rcv_isn;
u32 snt_isn;
+ u32 ts_off;
u32 last_oow_ack_time; /* last SYNACK */
u32 rcv_nxt; /* the ack # by SYNACK. For
* FastOpen it's the seq#
@@ -176,8 +177,6 @@ struct tcp_sock {
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
- struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
-
u32 snd_una; /* First byte we want an ack for */
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
@@ -187,7 +186,6 @@ struct tcp_sock {
u32 tsoffset; /* timestamp offset */
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- unsigned long tsq_flags;
/* Data for direct copy to user */
struct {
@@ -213,8 +211,11 @@ struct tcp_sock {
u8 reord; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
- u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
- unused:7;
+ u32 chrono_start; /* Start time in jiffies of a TCP chrono */
+ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
+ u8 chrono_type:2, /* current chronograph type */
+ rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ unused:5;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -362,7 +363,7 @@ struct tcp_sock {
u32 *saved_syn;
};
-enum tsq_flags {
+enum tsq_enum {
TSQ_THROTTLED,
TSQ_QUEUED,
TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */
@@ -373,6 +374,15 @@ enum tsq_flags {
*/
};
+enum tsq_flags {
+ TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
+ TSQF_QUEUED = (1UL << TSQ_QUEUED),
+ TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
+ TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
+ TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
+ TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
+};
+
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
@@ -427,4 +437,6 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
tp->saved_syn = NULL;
}
+struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
+
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 511182a88e76..e275e98bdceb 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -28,6 +28,7 @@
#include <linux/of.h>
#include <linux/idr.h>
#include <linux/device.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <uapi/linux/thermal.h>
@@ -204,6 +205,7 @@ struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
struct device device;
+ struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
struct thermal_attr *trip_hyst_attrs;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 2873baf5372a..58373875e8ee 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,50 +9,17 @@
#include <linux/types.h>
#include <linux/bug.h>
-
-struct timespec;
-struct compat_timespec;
+#include <linux/restart_block.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
-#define current_thread_info() ((struct thread_info *)current)
-#endif
-
/*
- * System call restart block.
+ * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
+ * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
+ * including <asm/current.h> can cause a circular dependency on some platforms.
*/
-struct restart_block {
- long (*fn)(struct restart_block *);
- union {
- /* For futex_wait and futex_wait_requeue_pi */
- struct {
- u32 __user *uaddr;
- u32 val;
- u32 flags;
- u32 bitset;
- u64 time;
- u32 __user *uaddr2;
- } futex;
- /* For nanosleep */
- struct {
- clockid_t clockid;
- struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
- struct compat_timespec __user *compat_rmtp;
+#include <asm/current.h>
+#define current_thread_info() ((struct thread_info *)current)
#endif
- u64 expires;
- } nanosleep;
- /* For poll */
- struct {
- struct pollfd __user *ufds;
- int nfds;
- int has_timeout;
- unsigned long tv_sec;
- unsigned long tv_nsec;
- } poll;
- };
-};
-
-extern long do_no_restart_syscall(struct restart_block *parm);
#include <linux/bitops.h>
#include <asm/thread_info.h>
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 62be0786d6d0..a04fea19676f 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -127,9 +127,7 @@ static inline void tick_nohz_idle_exit(void) { }
static inline ktime_t tick_nohz_get_sleep_length(void)
{
- ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
-
- return len;
+ return NSEC_PER_SEC / HZ;
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
diff --git a/include/linux/time.h b/include/linux/time.h
index 4cea09d94208..23f0f5ce3090 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -172,8 +172,6 @@ extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
extern int do_getitimer(int which, struct itimerval *value);
-extern unsigned int alarm_setitimer(unsigned int seconds);
-
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
struct tms;
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index 4382035a75bb..2496ad4cfc99 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -20,7 +20,7 @@
#include <linux/types.h>
/* simplify initialization of mask field */
-#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
/**
* struct cyclecounter - hardware abstraction for a free running counter
@@ -37,8 +37,8 @@
* @shift: cycle to nanosecond divisor (power of two)
*/
struct cyclecounter {
- cycle_t (*read)(const struct cyclecounter *cc);
- cycle_t mask;
+ u64 (*read)(const struct cyclecounter *cc);
+ u64 mask;
u32 mult;
u32 shift;
};
@@ -63,7 +63,7 @@ struct cyclecounter {
*/
struct timecounter {
const struct cyclecounter *cc;
- cycle_t cycle_last;
+ u64 cycle_last;
u64 nsec;
u64 mask;
u64 frac;
@@ -77,7 +77,7 @@ struct timecounter {
* @frac: pointer to storage for the fractional nanoseconds.
*/
static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
- cycle_t cycles, u64 mask, u64 *frac)
+ u64 cycles, u64 mask, u64 *frac)
{
u64 ns = (u64) cycles;
@@ -134,6 +134,6 @@ extern u64 timecounter_read(struct timecounter *tc);
* in the past.
*/
extern u64 timecounter_cyc2time(struct timecounter *tc,
- cycle_t cycle_tstamp);
+ u64 cycle_tstamp);
#endif
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e88005459035..110f4532188c 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,9 +29,9 @@
*/
struct tk_read_base {
struct clocksource *clock;
- cycle_t (*read)(struct clocksource *cs);
- cycle_t mask;
- cycle_t cycle_last;
+ u64 (*read)(struct clocksource *cs);
+ u64 mask;
+ u64 cycle_last;
u32 mult;
u32 shift;
u64 xtime_nsec;
@@ -97,7 +97,7 @@ struct timekeeper {
struct timespec64 raw_time;
/* The following members are for timekeeping internal use */
- cycle_t cycle_interval;
+ u64 cycle_interval;
u64 xtime_interval;
s64 xtime_remainder;
u32 raw_interval;
@@ -136,7 +136,7 @@ extern void update_vsyscall_tz(void);
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult,
- cycle_t cycle_last);
+ u64 cycle_last);
extern void update_vsyscall_tz(void);
#else
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 09168c52ab64..d2e804e15c3e 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -249,6 +249,7 @@ static inline u64 ktime_get_raw_ns(void)
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
+extern u64 ktime_get_boot_fast_ns(void);
/*
* Timespec interfaces utilizing the ktime based ones
@@ -292,7 +293,7 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
* @cs_was_changed_seq: The sequence number of clocksource change events
*/
struct system_time_snapshot {
- cycle_t cycles;
+ u64 cycles;
ktime_t real;
ktime_t raw;
unsigned int clock_was_set_seq;
@@ -320,7 +321,7 @@ struct system_device_crosststamp {
* timekeeping code to verify comparibility of two cycle values
*/
struct system_counterval_t {
- cycle_t cycles;
+ u64 cycles;
struct clocksource *cs;
};
diff --git a/include/linux/trace.h b/include/linux/trace.h
new file mode 100644
index 000000000000..9330a58e2651
--- /dev/null
+++ b/include/linux/trace.h
@@ -0,0 +1,28 @@
+#ifndef _LINUX_TRACE_H
+#define _LINUX_TRACE_H
+
+#ifdef CONFIG_TRACING
+/*
+ * The trace export - an export of Ftrace output. The trace_export
+ * can process traces and export them to a registered destination as
+ * an addition to the current only output of Ftrace - i.e. ring buffer.
+ *
+ * If you want traces to be sent to some other place rather than ring
+ * buffer only, just need to register a new trace_export and implement
+ * its own .write() function for writing traces to the storage.
+ *
+ * next - pointer to the next trace_export
+ * write - copy traces which have been delt with ->commit() to
+ * the destination
+ */
+struct trace_export {
+ struct trace_export __rcu *next;
+ void (*write)(const void *, unsigned int);
+};
+
+int register_ftrace_export(struct trace_export *export);
+int unregister_ftrace_export(struct trace_export *export);
+
+#endif /* CONFIG_TRACING */
+
+#endif /* _LINUX_TRACE_H */
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 4ac89acb6136..a03192052066 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -29,7 +29,7 @@ struct tracepoint_func {
struct tracepoint {
const char *name; /* Tracepoint name */
struct static_key key;
- void (*regfunc)(void);
+ int (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;
};
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index be586c632a0c..f72fcfe0e66a 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -81,7 +81,7 @@ static inline void tracepoint_synchronize_unregister(void)
}
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-extern void syscall_regfunc(void);
+extern int syscall_regfunc(void);
extern void syscall_unregfunc(void);
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
diff --git a/include/linux/types.h b/include/linux/types.h
index baf718324f4a..1e7bd24848fc 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -154,8 +154,8 @@ typedef u64 dma_addr_t;
typedef u32 dma_addr_t;
#endif
-typedef unsigned __bitwise__ gfp_t;
-typedef unsigned __bitwise__ fmode_t;
+typedef unsigned __bitwise gfp_t;
+typedef unsigned __bitwise fmode_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t;
@@ -228,8 +228,5 @@ struct callback_head {
typedef void (*rcu_callback_t)(struct rcu_head *head);
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
-/* clocksource cycle base type */
-typedef u64 cycle_t;
-
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index d1fd8cd39478..c0f530809d1f 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -79,6 +79,9 @@ struct udp_sock {
int (*gro_complete)(struct sock *sk,
struct sk_buff *skb,
int nhoff);
+
+ /* This field is dirtied by udp_recvmsg() */
+ int forward_deficit;
};
static inline struct udp_sock *udp_sk(const struct sock *sk)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 6e22b544d039..804e34c6f981 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -89,7 +89,9 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
+bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
@@ -125,7 +127,7 @@ static inline bool iter_is_iovec(const struct iov_iter *i)
*
* The ?: is just for type safety.
*/
-#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK)
+#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
/*
* Cap the iov_iter by given limit; note that the second argument is
@@ -155,6 +157,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
}
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
int import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 4a29c75b146e..0a294e950df8 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
#include <linux/types.h>
+#include <linux/wait.h>
struct vm_area_struct;
struct mm_struct;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eba1f10e8cfd..7e68259360de 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1160,7 +1160,7 @@ extern struct bus_type usb_bus_type;
* @minor_base: the start of the minor range for this driver.
*
* This structure is used for the usb_register_dev() and
- * usb_unregister_dev() functions, to consolidate a number of the
+ * usb_deregister_dev() functions, to consolidate a number of the
* parameters used for them.
*/
struct usb_class_driver {
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 8e81f9eb95e4..e4516e9ded0f 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -429,7 +429,9 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
*/
static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
{
- return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize));
+ int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff;
+
+ return round_up(len, max_packet_size);
}
/**
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 66fc13705ab7..40edf6a8533e 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -566,21 +566,22 @@ extern void usb_ep0_reinit(struct usb_device *);
((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
/* class requests from the USB 2.0 hub spec, table 11-15 */
+#define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request))
/* GetBusState and SetHubDescriptor are optional, omitted */
-#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE)
-#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE)
-#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR)
-#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS)
-#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS)
-#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE)
-#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE)
+#define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE)
+#define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE)
+#define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR)
+#define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS)
+#define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS)
+#define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE)
+#define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE)
/*-------------------------------------------------------------------------*/
/* class requests from USB 3.1 hub spec, table 10-7 */
-#define SetHubDepth (0x2000 | HUB_SET_DEPTH)
-#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT)
+#define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH)
+#define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT)
/*
* Generic bandwidth allocation constants/support
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index dd66a952e8cd..11b92b047a1e 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -27,7 +27,7 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
-extern int handle_userfault(struct fault_env *fe, unsigned long reason);
+extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len);
@@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
-static inline int handle_userfault(struct fault_env *fe, unsigned long reason)
+static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason)
{
return VM_FAULT_SIGBUS;
}
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 0ecae0b1cd34..edf9b2cad277 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -75,7 +75,16 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
void (*detach_group)(void *iommu_data,
struct iommu_group *group);
-
+ int (*pin_pages)(void *iommu_data, unsigned long *user_pfn,
+ int npage, int prot,
+ unsigned long *phys_pfn);
+ int (*unpin_pages)(void *iommu_data,
+ unsigned long *user_pfn, int npage);
+ int (*register_notifier)(void *iommu_data,
+ unsigned long *events,
+ struct notifier_block *nb);
+ int (*unregister_notifier)(void *iommu_data,
+ struct notifier_block *nb);
};
extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
@@ -92,6 +101,36 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
+#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
+
+extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+ int npage, int prot, unsigned long *phys_pfn);
+extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+ int npage);
+
+/* each type has independent events */
+enum vfio_notify_type {
+ VFIO_IOMMU_NOTIFY = 0,
+ VFIO_GROUP_NOTIFY = 1,
+};
+
+/* events for VFIO_IOMMU_NOTIFY */
+#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0)
+
+/* events for VFIO_GROUP_NOTIFY */
+#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0)
+
+extern int vfio_register_notifier(struct device *dev,
+ enum vfio_notify_type type,
+ unsigned long *required_events,
+ struct notifier_block *nb);
+extern int vfio_unregister_notifier(struct device *dev,
+ enum vfio_notify_type type,
+ struct notifier_block *nb);
+
+struct kvm;
+extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
+
/*
* Sub-module helpers
*/
@@ -103,6 +142,13 @@ extern struct vfio_info_cap_header *vfio_info_cap_add(
struct vfio_info_cap *caps, size_t size, u16 id, u16 version);
extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
+extern int vfio_info_add_capability(struct vfio_info_cap *caps,
+ int cap_type_id, void *cap_type);
+
+extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
+ int num_irqs, int max_irq_type,
+ size_t *data_size);
+
struct pci_dev;
#ifdef CONFIG_EEH
extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 1c912f85e041..66204007d7ac 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -58,7 +58,7 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
bool little_endian)
{
- memset(hdr, 0, sizeof(*hdr));
+ memset(hdr, 0, sizeof(*hdr)); /* no info leak */
if (skb_is_gso(skb)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
@@ -98,4 +98,4 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
return 0;
}
-#endif /* _LINUX_VIRTIO_BYTEORDER */
+#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3d9d786a943c..d68edffbf142 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
const void *caller);
extern void vfree(const void *addr);
+extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
diff --git a/include/linux/vme.h b/include/linux/vme.h
index ea6095deba20..8c589176c2f8 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -113,7 +113,6 @@ struct vme_driver {
int (*match)(struct vme_dev *);
int (*probe)(struct vme_dev *);
int (*remove)(struct vme_dev *);
- void (*shutdown)(void);
struct device_driver driver;
struct list_head devices;
};
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 6abd24f258bc..833fdd4794a0 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -191,5 +191,7 @@ extern void vt_set_led_state(int console, int leds);
extern void vt_kbd_con_start(int console);
extern void vt_kbd_con_stop(int console);
+void vc_scrolldelta_helper(struct vc_data *c, int lines,
+ unsigned int rolled_over, void *_base, unsigned int size);
#endif /* _VT_KERN_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2408e8d5c05c..1421132e9086 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -510,7 +510,7 @@ do { \
hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
HRTIMER_MODE_REL); \
hrtimer_init_sleeper(&__t, current); \
- if ((timeout).tv64 != KTIME_MAX) \
+ if ((timeout) != KTIME_MAX) \
hrtimer_start_range_ns(&__t.timer, timeout, \
current->timer_slack_ns, \
HRTIMER_MODE_REL); \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index fc6e22186405..a26cc437293c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -119,18 +119,30 @@ struct delayed_work {
int cpu;
};
-/*
- * A struct for workqueue attributes. This can be used to change
- * attributes of an unbound workqueue.
+/**
+ * struct workqueue_attrs - A struct for workqueue attributes.
*
- * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
- * only modifies how apply_workqueue_attrs() select pools and thus doesn't
- * participate in pool hash calculations or equality comparisons.
+ * This can be used to change attributes of an unbound workqueue.
*/
struct workqueue_attrs {
- int nice; /* nice level */
- cpumask_var_t cpumask; /* allowed CPUs */
- bool no_numa; /* disable NUMA affinity */
+ /**
+ * @nice: nice level
+ */
+ int nice;
+
+ /**
+ * @cpumask: allowed CPUs
+ */
+ cpumask_var_t cpumask;
+
+ /**
+ * @no_numa: disable NUMA affinity
+ *
+ * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
+ * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
+ * doesn't participate in pool hash calculations or equality comparisons.
+ */
+ bool no_numa;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -272,7 +284,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
/*
* Workqueue flags and constants. For details, please refer to
- * Documentation/workqueue.txt.
+ * Documentation/core-api/workqueue.rst.
*/
enum {
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
@@ -370,7 +382,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* @args...: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
- * information on WQ_* flags, please refer to Documentation/workqueue.txt.
+ * information on WQ_* flags, please refer to
+ * Documentation/core-api/workqueue.rst.
*
* The __lock_name macro dance is to guarantee that single lock_class_key
* doesn't end up with different namesm, which isn't allowed by lockdep.
@@ -590,14 +603,6 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork,
return queue_delayed_work(system_wq, dwork, delay);
}
-/**
- * keventd_up - is workqueue initialized yet?
- */
-static inline bool keventd_up(void)
-{
- return system_wq != NULL;
-}
-
#ifndef CONFIG_SMP
static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
@@ -632,4 +637,7 @@ int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif
+int __init workqueue_init_early(void);
+int __init workqueue_init(void);
+
#endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 797100e10010..5527d910ba3d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -9,6 +9,9 @@
#include <linux/fs.h>
#include <linux/flex_proportions.h>
#include <linux/backing-dev-defs.h>
+#include <linux/blk_types.h>
+
+struct bio;
DECLARE_PER_CPU(int, dirty_throttle_leaks);
@@ -100,6 +103,16 @@ struct writeback_control {
#endif
};
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
+{
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ return REQ_SYNC;
+ else if (wbc->for_kupdate || wbc->for_background)
+ return REQ_BACKGROUND;
+
+ return 0;
+}
+
/*
* A wb_domain represents a domain that wb's (bdi_writeback's) belong to
* and are measured against each other in. There always is one global
@@ -362,7 +375,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
-void page_writeback_init(void);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
bool wb_over_bg_thresh(struct bdi_writeback *wb);
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 2bb5deb0012e..7b0066814fa0 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -120,7 +120,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
{
ctx->task = current;
- ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
+ ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
ctx->acquired = 0;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;
diff --git a/include/media/cec.h b/include/media/cec.h
index fdb5d600e4bb..96a0aa770d61 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -35,7 +35,6 @@
* struct cec_devnode - cec device node
* @dev: cec device
* @cdev: cec character device
- * @parent: parent device
* @minor: device node minor number
* @registered: the device was correctly registered
* @unregistered: the device was unregistered
@@ -51,7 +50,6 @@ struct cec_devnode {
/* sysfs */
struct device dev;
struct cdev cdev;
- struct device *parent;
/* device info */
int minor;
@@ -196,11 +194,10 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
return adap->phys_addr == 0;
}
-#if IS_ENABLED(CONFIG_MEDIA_CEC)
+#if IS_ENABLED(CONFIG_MEDIA_CEC_SUPPORT)
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
- void *priv, const char *name, u32 caps, u8 available_las,
- struct device *parent);
-int cec_register_adapter(struct cec_adapter *adap);
+ void *priv, const char *name, u32 caps, u8 available_las);
+int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
void cec_unregister_adapter(struct cec_adapter *adap);
void cec_delete_adapter(struct cec_adapter *adap);
@@ -218,7 +215,8 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg);
#else
-static inline int cec_register_adapter(struct cec_adapter *adap)
+static inline int cec_register_adapter(struct cec_adapter *adap,
+ struct device *parent)
{
return 0;
}
diff --git a/include/media/media-device.h b/include/media/media-device.h
index ef93e21335df..c21b4c5f5871 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -39,8 +39,10 @@ struct device;
* @notify_data: Input data to invoke the callback
* @notify: Callback function pointer
*
- * Drivers may register a callback to take action when
- * new entities get registered with the media device.
+ * Drivers may register a callback to take action when new entities get
+ * registered with the media device. This handler is intended for creating
+ * links between existing entities and should not create entities and register
+ * them.
*/
struct media_entity_notify {
struct list_head list;
@@ -373,30 +375,6 @@ int __must_check media_device_register_entity_notify(struct media_device *mdev,
void media_device_unregister_entity_notify(struct media_device *mdev,
struct media_entity_notify *nptr);
-/**
- * media_device_get_devres() - get media device as device resource
- * creates if one doesn't exist
- *
- * @dev: pointer to struct &device.
- *
- * Sometimes, the media controller &media_device needs to be shared by more
- * than one driver. This function adds support for that, by dynamically
- * allocating the &media_device and allowing it to be obtained from the
- * struct &device associated with the common device where all sub-device
- * components belong. So, for example, on an USB device with multiple
- * interfaces, each interface may be handled by a separate per-interface
- * drivers. While each interface have its own &device, they all share a
- * common &device associated with the hole USB device.
- */
-struct media_device *media_device_get_devres(struct device *dev);
-
-/**
- * media_device_find_devres() - find media device as device resource
- *
- * @dev: pointer to struct &device.
- */
-struct media_device *media_device_find_devres(struct device *dev);
-
/* Iterate over all entities. */
#define media_device_for_each_entity(entity, mdev) \
list_for_each_entry(entity, &(mdev)->entities, graph_obj.list)
@@ -474,14 +452,6 @@ static inline void media_device_unregister_entity_notify(
struct media_entity_notify *nptr)
{
}
-static inline struct media_device *media_device_get_devres(struct device *dev)
-{
- return NULL;
-}
-static inline struct media_device *media_device_find_devres(struct device *dev)
-{
- return NULL;
-}
static inline void media_device_pci_init(struct media_device *mdev,
struct pci_dev *pci_dev,
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 40188d362486..55281b92105a 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -68,6 +68,7 @@ enum rc_filter_type {
* struct rc_dev - represents a remote control device
* @dev: driver model's view of this device
* @initialized: 1 if the device init has completed, 0 otherwise
+ * @managed_alloc: devm_rc_allocate_device was used to create rc_dev
* @sysfs_groups: sysfs attribute groups
* @input_name: name of the input child device
* @input_phys: physical path to the input child device
@@ -131,6 +132,7 @@ enum rc_filter_type {
struct rc_dev {
struct device dev;
atomic_t initialized;
+ bool managed_alloc;
const struct attribute_group *sysfs_groups[5];
const char *input_name;
const char *input_phys;
@@ -203,6 +205,14 @@ struct rc_dev {
struct rc_dev *rc_allocate_device(void);
/**
+ * devm_rc_allocate_device - Managed RC device allocation
+ *
+ * @dev: pointer to struct device
+ * returns a pointer to struct rc_dev.
+ */
+struct rc_dev *devm_rc_allocate_device(struct device *dev);
+
+/**
* rc_free_device - Frees a RC device
*
* @dev: pointer to struct rc_dev.
@@ -217,6 +227,14 @@ void rc_free_device(struct rc_dev *dev);
int rc_register_device(struct rc_dev *dev);
/**
+ * devm_rc_register_device - Manageded registering of a RC device
+ *
+ * @parent: pointer to struct device.
+ * @dev: pointer to struct rc_dev.
+ */
+int devm_rc_register_device(struct device *parent, struct rc_dev *dev);
+
+/**
* rc_unregister_device - Unregisters a RC device
*
* @dev: pointer to struct rc_dev.
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 350cbf9fb10e..aac8b7b6e691 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -55,6 +55,13 @@
v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \
} while (0)
+/* Add a version of v4l_dbg to be used on drivers using dev_foo() macros */
+#define dev_dbg_lvl(__dev, __level, __debug, __fmt, __arg...) \
+ do { \
+ if (__debug >= (__level)) \
+ dev_printk(KERN_DEBUG, __dev, __fmt, ##__arg); \
+ } while (0)
+
/* ------------------------------------------------------------------------- */
/* These printk constructs can be used with v4l2_device and v4l2_subdev */
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 0a7d9e1fc8c8..61a18893e004 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -101,12 +101,22 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
void *fnc_handle);
/**
+ * v4l2_find_dv_timings_cea861_vic() - find timings based on CEA-861 VIC
+ * @t: the timings data.
+ * @vic: CEA-861 VIC code
+ *
+ * On success it will fill in @t with the found timings and it returns true.
+ * On failure it will return false.
+ */
+bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic);
+
+/**
* v4l2_match_dv_timings() - do two timings match?
*
* @measured: the measured timings data.
* @standard: the timings according to the standard.
* @pclock_delta: maximum delta in Hz between standard->pixelclock and
- * the measured timings.
+ * the measured timings.
* @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
* match.
*
@@ -185,6 +195,14 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
*/
struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait);
+/**
+ * v4l2_dv_timings_aspect_ratio - calculate the aspect ratio based on the
+ * v4l2_dv_timings information.
+ *
+ * @t: the timings data.
+ */
+struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t);
+
/*
* reduce_fps - check if conditions for reduced fps are true.
* bt - v4l2 timing structure
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 1b355344c804..3ccd01bd245e 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -90,6 +90,9 @@ struct v4l2_m2m_queue_ctx {
* %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
* @finished: Wait queue used to signalize when a job queue finished.
* @priv: Instance private data
+ *
+ * The memory to memory context is specific to a file handle, NOT to e.g.
+ * a device.
*/
struct v4l2_m2m_ctx {
/* optional cap/out vb2 queues lock */
diff --git a/include/media/v4l2-tpg.h b/include/media/v4l2-tpg.h
index 329bebfa930c..13e49d85cae3 100644
--- a/include/media/v4l2-tpg.h
+++ b/include/media/v4l2-tpg.h
@@ -87,6 +87,13 @@ enum tpg_move_mode {
TPG_MOVE_POS_FAST,
};
+enum tgp_color_enc {
+ TGP_COLOR_ENC_RGB,
+ TGP_COLOR_ENC_YCBCR,
+ TGP_COLOR_ENC_HSV,
+ TGP_COLOR_ENC_LUMA,
+};
+
extern const char * const tpg_aspect_strings[];
#define TPG_MAX_PLANES 3
@@ -119,10 +126,11 @@ struct tpg_data {
u8 saturation;
s16 hue;
u32 fourcc;
- bool is_yuv;
+ enum tgp_color_enc color_enc;
u32 colorspace;
u32 xfer_func;
u32 ycbcr_enc;
+ u32 hsv_enc;
/*
* Stores the actual transfer function, i.e. will never be
* V4L2_XFER_FUNC_DEFAULT.
@@ -132,6 +140,7 @@ struct tpg_data {
* Stores the actual Y'CbCr encoding, i.e. will never be
* V4L2_YCBCR_ENC_DEFAULT.
*/
+ u32 real_hsv_enc;
u32 real_ycbcr_enc;
u32 quantization;
/*
@@ -334,6 +343,19 @@ static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
return tpg->ycbcr_enc;
}
+static inline void tpg_s_hsv_enc(struct tpg_data *tpg, u32 hsv_enc)
+{
+ if (tpg->hsv_enc == hsv_enc)
+ return;
+ tpg->hsv_enc = hsv_enc;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_hsv_enc(const struct tpg_data *tpg)
+{
+ return tpg->hsv_enc;
+}
+
static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func)
{
if (tpg->xfer_func == xfer_func)
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 82f3c912a5b1..1d716449209e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -36,13 +36,12 @@ struct tc_action {
struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats;
struct gnet_stats_queue tcfa_qstats;
- struct gnet_stats_rate_est64 tcfa_rate_est;
+ struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
struct rcu_head tcfa_rcu;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
};
-#define tcf_act common.tcfa_act
#define tcf_head common.tcfa_head
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -120,6 +119,8 @@ struct tc_action_ops {
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int, const struct tc_action_ops *);
void (*stats_update)(struct tc_action *, u64, u32, u64);
+ int (*get_dev)(const struct tc_action *a, struct net *net,
+ struct net_device **mirred_dev);
};
struct tc_action_net {
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 0a1e21d7bce1..01487192f628 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -197,7 +197,7 @@ typedef struct {
#define BDADDR_LE_PUBLIC 0x01
#define BDADDR_LE_RANDOM 0x02
-static inline bool bdaddr_type_is_valid(__u8 type)
+static inline bool bdaddr_type_is_valid(u8 type)
{
switch (type) {
case BDADDR_BREDR:
@@ -209,7 +209,7 @@ static inline bool bdaddr_type_is_valid(__u8 type)
return false;
}
-static inline bool bdaddr_type_is_le(__u8 type)
+static inline bool bdaddr_type_is_le(u8 type)
{
switch (type) {
case BDADDR_LE_PUBLIC:
@@ -279,15 +279,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
struct l2cap_ctrl {
- __u8 sframe:1,
+ u8 sframe:1,
poll:1,
final:1,
fcs:1,
sar:2,
super:2;
- __u16 reqseq;
- __u16 txseq;
- __u8 retries;
+
+ u16 reqseq;
+ u16 txseq;
+ u8 retries;
__le16 psm;
bdaddr_t bdaddr;
struct l2cap_chan *chan;
@@ -303,7 +304,7 @@ typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
#define HCI_REQ_SKB BIT(1)
struct hci_ctrl {
- __u16 opcode;
+ u16 opcode;
u8 req_flags;
u8 req_event;
union {
@@ -313,10 +314,10 @@ struct hci_ctrl {
};
struct bt_skb_cb {
- __u8 pkt_type;
- __u8 force_active;
- __u16 expect;
- __u8 incoming:1;
+ u8 pkt_type;
+ u8 force_active;
+ u16 expect;
+ u8 incoming:1;
union {
struct l2cap_ctrl l2cap;
struct hci_ctrl hci;
@@ -366,7 +367,7 @@ out:
return NULL;
}
-int bt_to_errno(__u16 code);
+int bt_to_errno(u16 code);
void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f32f7ef8a23a..3c857778a6ca 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -681,7 +681,7 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
}
/* exported from bond_main.c */
-extern int bond_net_id;
+extern unsigned int bond_net_id;
extern const struct bond_parm_tbl bond_lacp_tbl[];
extern const struct bond_parm_tbl xmit_hashtype_tbl[];
extern const struct bond_parm_tbl arp_validate_tbl[];
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 2fbeb1313c0f..d73b849e29a6 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -58,10 +58,9 @@ static inline unsigned long busy_loop_end_time(void)
return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
}
-static inline bool sk_can_busy_loop(struct sock *sk)
+static inline bool sk_can_busy_loop(const struct sock *sk)
{
- return sk->sk_ll_usec && sk->sk_napi_id &&
- !need_resched() && !signal_pending(current);
+ return sk->sk_ll_usec && sk->sk_napi_id && !signal_pending(current);
}
@@ -81,11 +80,6 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
skb->napi_id = napi->napi_id;
}
-/* used in the protocol hanlder to propagate the napi_id to the socket */
-static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
-{
- sk->sk_napi_id = skb->napi_id;
-}
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
@@ -108,10 +102,6 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
{
}
-static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
-{
-}
-
static inline bool busy_loop_timeout(unsigned long end_time)
{
return true;
@@ -123,4 +113,23 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+/* used in the protocol hanlder to propagate the napi_id to the socket */
+static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ sk->sk_napi_id = skb->napi_id;
+#endif
+}
+
+/* variant used for unconnected sockets */
+static inline void sk_mark_napi_id_once(struct sock *sk,
+ const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!sk->sk_napi_id)
+ sk->sk_napi_id = skb->napi_id;
+#endif
+}
+
#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 14b51d739c3b..814be4b4200c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -772,6 +772,30 @@ struct cfg80211_csa_settings {
};
/**
+ * struct iface_combination_params - input parameters for interface combinations
+ *
+ * Used to pass interface combination parameters
+ *
+ * @num_different_channels: the number of different channels we want
+ * to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ * width where radar detection is needed, as in the definition of
+ * &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the number of interfaces of each interface
+ * type. The index is the interface type as specified in &enum
+ * nl80211_iftype.
+ * @new_beacon_int: set this to the beacon interval of a new interface
+ * that's not operating yet, if such is to be checked as part of
+ * the verification
+ */
+struct iface_combination_params {
+ int num_different_channels;
+ u8 radar_detect;
+ int iftype_num[NUM_NL80211_IFTYPES];
+ u32 new_beacon_int;
+};
+
+/**
* enum station_parameters_apply_mask - station parameter values to apply
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
@@ -1761,9 +1785,11 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
* @key: WEP key for shared key authentication
- * @sae_data: Non-IE data to use with SAE or %NULL. This starts with
- * Authentication transaction sequence number field.
- * @sae_data_len: Length of sae_data buffer in octets
+ * @auth_data: Fields and elements in Authentication frames. This contains
+ * the authentication frame body (non-IE and IE data), excluding the
+ * Authentication algorithm number, i.e., starting at the Authentication
+ * transaction sequence number field.
+ * @auth_data_len: Length of auth_data buffer in octets
*/
struct cfg80211_auth_request {
struct cfg80211_bss *bss;
@@ -1772,8 +1798,8 @@ struct cfg80211_auth_request {
enum nl80211_auth_type auth_type;
const u8 *key;
u8 key_len, key_idx;
- const u8 *sae_data;
- size_t sae_data_len;
+ const u8 *auth_data;
+ size_t auth_data_len;
};
/**
@@ -1814,6 +1840,12 @@ enum cfg80211_assoc_req_flags {
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT capability override
* @vht_capa_mask: VHT capability mask indicating which fields to use
+ * @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or
+ * %NULL if FILS is not used.
+ * @fils_kek_len: Length of fils_kek in octets
+ * @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
+ * Request/Response frame or %NULL if FILS is not used. This field starts
+ * with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -1825,6 +1857,9 @@ struct cfg80211_assoc_request {
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
struct ieee80211_vht_cap vht_capa, vht_capa_mask;
+ const u8 *fils_kek;
+ size_t fils_kek_len;
+ const u8 *fils_nonces;
};
/**
@@ -2016,6 +2051,18 @@ struct cfg80211_connect_params {
};
/**
+ * enum cfg80211_connect_params_changed - Connection parameters being updated
+ *
+ * This enum provides information of all connect parameters that
+ * have to be updated as part of update_connect_params() call.
+ *
+ * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated
+ */
+enum cfg80211_connect_params_changed {
+ UPDATE_ASSOC_IES = BIT(0),
+};
+
+/**
* enum wiphy_params_flags - set_wiphy_params bitfield values
* @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed
* @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed
@@ -2536,9 +2583,18 @@ struct cfg80211_nan_func {
* cases, the result of roaming is indicated with a call to
* cfg80211_roamed() or cfg80211_roamed_bss().
* (invoked with the wireless_dev mutex held)
- * @disconnect: Disconnect from the BSS/ESS. Once done, call
- * cfg80211_disconnected().
+ * @update_connect_params: Update the connect parameters while connected to a
+ * BSS. The updated parameters can be used by driver/firmware for
+ * subsequent BSS selection (roaming) decisions and to form the
+ * Authentication/(Re)Association Request frames. This call does not
+ * request an immediate disassociation or reassociation with the current
+ * BSS, i.e., this impacts only subsequent (re)associations. The bits in
+ * changed are defined in &enum cfg80211_connect_params_changed.
* (invoked with the wireless_dev mutex held)
+ * @disconnect: Disconnect from the BSS/ESS or stop connection attempts if
+ * connection is in progress. Once done, call cfg80211_disconnected() in
+ * case connection was already established (invoked with the
+ * wireless_dev mutex held), otherwise call cfg80211_connect_timeout().
*
* @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
* cfg80211_ibss_joined(), also call that function when changing BSSID due
@@ -2706,6 +2762,8 @@ struct cfg80211_nan_func {
* @nan_change_conf: changes NAN configuration. The changed parameters must
* be specified in @changes (using &enum cfg80211_nan_conf_changes);
* All other parameters must be ignored.
+ *
+ * @set_multicast_to_unicast: configure multicast to unicast conversion for BSS
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2820,6 +2878,10 @@ struct cfg80211_ops {
int (*connect)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme);
+ int (*update_connect_params)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_connect_params *sme,
+ u32 changed);
int (*disconnect)(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code);
@@ -2982,6 +3044,10 @@ struct cfg80211_ops {
struct wireless_dev *wdev,
struct cfg80211_nan_conf *conf,
u32 changes);
+
+ int (*set_multicast_to_unicast)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const bool enabled);
};
/*
@@ -3080,6 +3146,12 @@ struct ieee80211_iface_limit {
* only in special cases.
* @radar_detect_widths: bitmap of channel widths supported for radar detection
* @radar_detect_regions: bitmap of regions supported for radar detection
+ * @beacon_int_min_gcd: This interface combination supports different
+ * beacon intervals.
+ * = 0 - all beacon intervals for different interface must be same.
+ * > 0 - any beacon interval for the interface part of this combination AND
+ * *GCD* of all beacon intervals from beaconing interfaces of this
+ * combination must be greater or equal to this value.
*
* With this structure the driver can describe which interface
* combinations it supports concurrently.
@@ -3145,6 +3217,7 @@ struct ieee80211_iface_combination {
bool beacon_int_infra_match;
u8 radar_detect_widths;
u8 radar_detect_regions;
+ u32 beacon_int_min_gcd;
};
struct ieee80211_txrx_stypes {
@@ -3752,8 +3825,8 @@ struct cfg80211_cached_keys;
* @beacon_interval: beacon interval used on this device for transmitting
* beacons, 0 when not valid
* @address: The address for this device, valid only if @netdev is %NULL
- * @p2p_started: true if this is a P2P Device that has been started
- * @nan_started: true if this is a NAN interface that has been started
+ * @is_running: true if this is a non-netdev device that has been started, e.g.
+ * the P2P Device.
* @cac_started: true if DFS channel availability check has been started
* @cac_start_time: timestamp (jiffies) when the dfs state was entered.
* @cac_time_ms: CAC time in ms
@@ -3785,7 +3858,7 @@ struct wireless_dev {
struct mutex mtx;
- bool use_4addr, p2p_started, nan_started;
+ bool use_4addr, is_running;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
@@ -3842,6 +3915,13 @@ static inline u8 *wdev_address(struct wireless_dev *wdev)
return wdev->address;
}
+static inline bool wdev_running(struct wireless_dev *wdev)
+{
+ if (wdev->netdev)
+ return netif_running(wdev->netdev);
+ return wdev->is_running;
+}
+
/**
* wdev_priv - return wiphy priv from wireless_dev
*
@@ -4163,6 +4243,27 @@ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
}
/**
+ * cfg80211_find_ext_ie - find information element with EID Extension in data
+ *
+ * @ext_eid: element ID Extension
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the extended element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data), or a pointer to the first byte of the requested
+ * element, that is the byte containing the element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len)
+{
+ return cfg80211_find_ie_match(WLAN_EID_EXTENSION, ies, len,
+ &ext_eid, 1, 2);
+}
+
+/**
* cfg80211_find_vendor_ie - find vendor specific information element in data
*
* @oui: vendor OUI
@@ -4562,7 +4663,8 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
* moves to cfg80211 in this call
* @buf: authentication frame (header + body)
* @len: length of the frame data
- * @uapsd_queues: bitmap of ACs configured to uapsd. -1 if n/a.
+ * @uapsd_queues: bitmap of queues configured for uapsd. Same format
+ * as the AC bitmap in the QoS info field
*
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
@@ -4584,6 +4686,17 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss);
/**
+ * cfg80211_abandon_assoc - notify cfg80211 of abandoned association attempt
+ * @dev: network device
+ * @bss: The BSS entry with which association was abandoned.
+ *
+ * Call this whenever - for reasons reported through other API, like deauth RX,
+ * an association attempt was abandoned.
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
+ */
+void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss);
+
+/**
* cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
* @dev: network device
* @buf: 802.11 frame (header + body)
@@ -5598,36 +5711,20 @@ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
* cfg80211_check_combinations - check interface combinations
*
* @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- * to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- * width where radar detection is needed, as in the definition of
- * &struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- * type. The index is the interface type as specified in &enum
- * nl80211_iftype.
+ * @params: the interface combinations parameter
*
* This function can be called by the driver to check whether a
* combination of interfaces and their types are allowed according to
* the interface combinations.
*/
int cfg80211_check_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES]);
+ struct iface_combination_params *params);
/**
* cfg80211_iter_combinations - iterate over matching combinations
*
* @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- * to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- * width where radar detection is needed, as in the definition of
- * &struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- * type. The index is the interface type as specified in &enum
- * nl80211_iftype.
+ * @params: the interface combinations parameter
* @iter: function to call for each matching combination
* @data: pointer to pass to iter function
*
@@ -5636,9 +5733,7 @@ int cfg80211_check_combinations(struct wiphy *wiphy,
* purposes.
*/
int cfg80211_iter_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES],
+ struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data);
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 5c30891e84e5..35d0fabd2782 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -22,7 +22,7 @@
#include <linux/errno.h>
#include <asm/types.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/checksum.h>
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 211bd3c37028..d29e5fc82582 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -92,6 +92,8 @@ struct devlink_ops {
int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
+ int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
+ int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
};
static inline void *devlink_priv(struct devlink *devlink)
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 6965c8f68ade..701fc814d0af 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -115,6 +115,7 @@ static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb
static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
__be32 daddr,
__u8 tos, __u8 ttl,
+ __be16 tp_dst,
__be16 flags,
__be64 tunnel_id,
int md_size)
@@ -127,7 +128,7 @@ static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
saddr, daddr, tos, ttl,
- 0, 0, 0, tunnel_id, flags);
+ 0, 0, tp_dst, tunnel_id, flags);
return tun_dst;
}
@@ -139,12 +140,13 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
const struct iphdr *iph = ip_hdr(skb);
return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
- flags, tunnel_id, md_size);
+ 0, flags, tunnel_id, md_size);
}
static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u8 tos, __u8 ttl,
+ __be16 tp_dst,
__be32 label,
__be16 flags,
__be64 tunnel_id,
@@ -162,7 +164,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad
info->key.tun_flags = flags;
info->key.tun_id = tunnel_id;
info->key.tp_src = 0;
- info->key.tp_dst = 0;
+ info->key.tp_dst = tp_dst;
info->key.u.ipv6.src = *saddr;
info->key.u.ipv6.dst = *daddr;
@@ -183,7 +185,7 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
ipv6_get_dsfield(ip6h), ip6h->hop_limit,
- ip6_flowlabel(ip6h), flags, tunnel_id,
+ 0, ip6_flowlabel(ip6h), flags, tunnel_id,
md_size);
}
#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 456e4a6006ab..8dbfdf728cd8 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -8,6 +8,11 @@
#include <net/flow.h>
#include <net/rtnetlink.h>
+struct fib_kuid_range {
+ kuid_t start;
+ kuid_t end;
+};
+
struct fib_rule {
struct list_head list;
int iifindex;
@@ -30,6 +35,7 @@ struct fib_rule {
int suppress_prefixlen;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
+ struct fib_kuid_range uid_range;
struct rcu_head rcu;
};
@@ -92,7 +98,8 @@ struct fib_rules_ops {
[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
[FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
[FRA_GOTO] = { .type = NLA_U32 }, \
- [FRA_L3MDEV] = { .type = NLA_U8 }
+ [FRA_L3MDEV] = { .type = NLA_U8 }, \
+ [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }
static inline void fib_rule_get(struct fib_rule *rule)
{
diff --git a/include/net/flow.h b/include/net/flow.h
index 035aa7716967..6984f1913dc1 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -11,6 +11,7 @@
#include <linux/in6.h>
#include <linux/atomic.h>
#include <net/flow_dissector.h>
+#include <linux/uidgid.h>
/*
* ifindex generation is per-net namespace, and loopback is
@@ -37,6 +38,7 @@ struct flowi_common {
#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
struct flowi_tunnel flowic_tun_key;
+ kuid_t flowic_uid;
};
union flowi_uli {
@@ -74,6 +76,7 @@ struct flowi4 {
#define flowi4_flags __fl_common.flowic_flags
#define flowi4_secid __fl_common.flowic_secid
#define flowi4_tun_key __fl_common.flowic_tun_key
+#define flowi4_uid __fl_common.flowic_uid
/* (saddr,daddr) must be grouped, same order as in IP header */
__be32 saddr;
@@ -93,7 +96,8 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
__u32 mark, __u8 tos, __u8 scope,
__u8 proto, __u8 flags,
__be32 daddr, __be32 saddr,
- __be16 dport, __be16 sport)
+ __be16 dport, __be16 sport,
+ kuid_t uid)
{
fl4->flowi4_oif = oif;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
@@ -104,6 +108,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->flowi4_flags = flags;
fl4->flowi4_secid = 0;
fl4->flowi4_tun_key.tun_id = 0;
+ fl4->flowi4_uid = uid;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
@@ -131,6 +136,7 @@ struct flowi6 {
#define flowi6_flags __fl_common.flowic_flags
#define flowi6_secid __fl_common.flowic_secid
#define flowi6_tun_key __fl_common.flowic_tun_key
+#define flowi6_uid __fl_common.flowic_uid
struct in6_addr daddr;
struct in6_addr saddr;
/* Note: flowi6_tos is encoded in flowlabel, too. */
@@ -176,6 +182,7 @@ struct flowi {
#define flowi_flags u.__fl_common.flowic_flags
#define flowi_secid u.__fl_common.flowic_secid
#define flowi_tun_key u.__fl_common.flowic_tun_key
+#define flowi_uid u.__fl_common.flowic_uid
} __attribute__((__aligned__(BITS_PER_LONG/8)));
static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
@@ -239,6 +246,7 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
void *ctx);
int flow_cache_init(struct net *net);
void flow_cache_fini(struct net *net);
+void flow_cache_hp_init(void);
void flow_cache_flush(struct net *net);
void flow_cache_flush_deferred(struct net *net);
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index d9534927d93b..d896a33e00d4 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -104,6 +104,22 @@ struct flow_dissector_key_ports {
};
};
+/**
+ * flow_dissector_key_icmp:
+ * @ports: type and code of ICMP header
+ * icmp: ICMP type (high) and code (low)
+ * type: ICMP type
+ * code: ICMP code
+ */
+struct flow_dissector_key_icmp {
+ union {
+ __be16 icmp;
+ struct {
+ u8 type;
+ u8 code;
+ };
+ };
+};
/**
* struct flow_dissector_key_eth_addrs:
@@ -122,12 +138,18 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
+ FLOW_DISSECTOR_KEY_ENC_KEYID, /* struct flow_dissector_key_keyid */
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
+ FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */
+ FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_MAX,
};
diff --git a/include/net/flowcache.h b/include/net/flowcache.h
index c8f665ec6e0d..9caf3bfc8d2d 100644
--- a/include/net/flowcache.h
+++ b/include/net/flowcache.h
@@ -17,7 +17,7 @@ struct flow_cache_percpu {
struct flow_cache {
u32 hash_shift;
struct flow_cache_percpu __percpu *percpu;
- struct notifier_block hotcpu_notifier;
+ struct hlist_node node;
int low_watermark;
int high_watermark;
struct timer_list rnd_timer;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 231e121cc7d9..8b7aa370e7a4 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -11,6 +11,8 @@ struct gnet_stats_basic_cpu {
struct u64_stats_sync syncp;
};
+struct net_rate_estimator;
+
struct gnet_dump {
spinlock_t * lock;
struct sk_buff * skb;
@@ -42,8 +44,7 @@ void __gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
- const struct gnet_stats_basic_packed *b,
- struct gnet_stats_rate_est64 *r);
+ struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
@@ -53,16 +54,16 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt);
-void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est64 *rate_est);
+void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **ptr,
spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt);
-bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
- const struct gnet_stats_rate_est64 *rate_est);
+bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
+bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
+ struct gnet_stats_rate_est64 *sample);
#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8d4608ce8716..a34275be3600 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -20,7 +20,7 @@ struct genl_info;
/**
* struct genl_family - generic netlink family
- * @id: protocol family idenfitier
+ * @id: protocol family identifier (private)
* @hdrsize: length of user specific header in bytes
* @name: name of family
* @version: protocol version
@@ -39,16 +39,16 @@ struct genl_info;
* Note that unbind() will not be called symmetrically if the
* generic netlink family is removed while there are still open
* sockets.
- * @attrbuf: buffer to store parsed attributes
- * @family_list: family list
- * @mcgrps: multicast groups used by this family (private)
- * @n_mcgrps: number of multicast groups (private)
+ * @attrbuf: buffer to store parsed attributes (private)
+ * @mcgrps: multicast groups used by this family
+ * @n_mcgrps: number of multicast groups
* @mcgrp_offset: starting number of multicast group IDs in this family
- * @ops: the operations supported by this family (private)
- * @n_ops: number of operations supported by this family (private)
+ * (private)
+ * @ops: the operations supported by this family
+ * @n_ops: number of operations supported by this family
*/
struct genl_family {
- unsigned int id;
+ int id; /* private */
unsigned int hdrsize;
char name[GENL_NAMSIZ];
unsigned int version;
@@ -64,15 +64,16 @@ struct genl_family {
int (*mcast_bind)(struct net *net, int group);
void (*mcast_unbind)(struct net *net, int group);
struct nlattr ** attrbuf; /* private */
- const struct genl_ops * ops; /* private */
- const struct genl_multicast_group *mcgrps; /* private */
- unsigned int n_ops; /* private */
- unsigned int n_mcgrps; /* private */
+ const struct genl_ops * ops;
+ const struct genl_multicast_group *mcgrps;
+ unsigned int n_ops;
+ unsigned int n_mcgrps;
unsigned int mcgrp_offset; /* private */
- struct list_head family_list; /* private */
struct module *module;
};
+struct nlattr **genl_family_attrbuf(const struct genl_family *family);
+
/**
* struct genl_info - receiving information
* @snd_seq: sending sequence number
@@ -130,64 +131,13 @@ struct genl_ops {
u8 flags;
};
-int __genl_register_family(struct genl_family *family);
-
-static inline int genl_register_family(struct genl_family *family)
-{
- family->module = THIS_MODULE;
- return __genl_register_family(family);
-}
-
-/**
- * genl_register_family_with_ops - register a generic netlink family with ops
- * @family: generic netlink family
- * @ops: operations to be registered
- * @n_ops: number of elements to register
- *
- * Registers the specified family and operations from the specified table.
- * Only one family may be registered with the same family name or identifier.
- *
- * The family id may equal GENL_ID_GENERATE causing an unique id to
- * be automatically generated and assigned.
- *
- * Either a doit or dumpit callback must be specified for every registered
- * operation or the function will fail. Only one operation structure per
- * command identifier may be registered.
- *
- * See include/net/genetlink.h for more documenation on the operations
- * structure.
- *
- * Return 0 on success or a negative error code.
- */
-static inline int
-_genl_register_family_with_ops_grps(struct genl_family *family,
- const struct genl_ops *ops, size_t n_ops,
- const struct genl_multicast_group *mcgrps,
- size_t n_mcgrps)
-{
- family->module = THIS_MODULE;
- family->ops = ops;
- family->n_ops = n_ops;
- family->mcgrps = mcgrps;
- family->n_mcgrps = n_mcgrps;
- return __genl_register_family(family);
-}
-
-#define genl_register_family_with_ops(family, ops) \
- _genl_register_family_with_ops_grps((family), \
- (ops), ARRAY_SIZE(ops), \
- NULL, 0)
-#define genl_register_family_with_ops_groups(family, ops, grps) \
- _genl_register_family_with_ops_grps((family), \
- (ops), ARRAY_SIZE(ops), \
- (grps), ARRAY_SIZE(grps))
-
-int genl_unregister_family(struct genl_family *family);
-void genl_notify(struct genl_family *family, struct sk_buff *skb,
+int genl_register_family(struct genl_family *family);
+int genl_unregister_family(const struct genl_family *family);
+void genl_notify(const struct genl_family *family, struct sk_buff *skb,
struct genl_info *info, u32 group, gfp_t flags);
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
- struct genl_family *family, int flags, u8 cmd);
+ const struct genl_family *family, int flags, u8 cmd);
/**
* genlmsg_nlhdr - Obtain netlink header from user specified header
@@ -196,8 +146,8 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
*
* Returns pointer to netlink header.
*/
-static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr,
- struct genl_family *family)
+static inline struct nlmsghdr *
+genlmsg_nlhdr(void *user_hdr, const struct genl_family *family)
{
return (struct nlmsghdr *)((char *)user_hdr -
family->hdrsize -
@@ -233,7 +183,7 @@ static inline int genlmsg_parse(const struct nlmsghdr *nlh,
*/
static inline void genl_dump_check_consistent(struct netlink_callback *cb,
void *user_hdr,
- struct genl_family *family)
+ const struct genl_family *family)
{
nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr, family));
}
@@ -250,7 +200,7 @@ static inline void genl_dump_check_consistent(struct netlink_callback *cb,
*/
static inline void *genlmsg_put_reply(struct sk_buff *skb,
struct genl_info *info,
- struct genl_family *family,
+ const struct genl_family *family,
int flags, u8 cmd)
{
return genlmsg_put(skb, info->snd_portid, info->snd_seq, family,
@@ -287,7 +237,7 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
* @group: offset of multicast group in groups array
* @flags: allocation flags
*/
-static inline int genlmsg_multicast_netns(struct genl_family *family,
+static inline int genlmsg_multicast_netns(const struct genl_family *family,
struct net *net, struct sk_buff *skb,
u32 portid, unsigned int group, gfp_t flags)
{
@@ -305,7 +255,7 @@ static inline int genlmsg_multicast_netns(struct genl_family *family,
* @group: offset of multicast group in groups array
* @flags: allocation flags
*/
-static inline int genlmsg_multicast(struct genl_family *family,
+static inline int genlmsg_multicast(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags)
{
@@ -323,7 +273,7 @@ static inline int genlmsg_multicast(struct genl_family *family,
*
* This function must hold the RTNL or rcu_read_lock().
*/
-int genlmsg_multicast_allns(struct genl_family *family,
+int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags);
@@ -407,8 +357,9 @@ static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
* This function returns the number of broadcast listeners that have set the
* NETLINK_RECV_NO_ENOBUFS socket option.
*/
-static inline int genl_set_err(struct genl_family *family, struct net *net,
- u32 portid, u32 group, int code)
+static inline int genl_set_err(const struct genl_family *family,
+ struct net *net, u32 portid,
+ u32 group, int code)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
@@ -416,7 +367,7 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
return netlink_set_err(net->genl_sock, portid, group, code);
}
-static inline int genl_has_listeners(struct genl_family *family,
+static inline int genl_has_listeners(const struct genl_family *family,
struct net *net, unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index ba07b9d8ed63..d0e7e3f8e67a 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -333,9 +333,9 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS 0x0003
#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK 0x00F0
#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU 0x0000
-#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0010
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0010
#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU 0x0020
-#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0030
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0030
#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN 0x00F0
#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT 0x00
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index b0576cb2ab25..0fa4c324b713 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -55,6 +55,7 @@ struct inet6_ifaddr {
__u8 stable_privacy_retry;
__u16 scope;
+ __u64 dad_nonce;
unsigned long cstamp; /* created timestamp */
unsigned long tstamp; /* updated timestamp */
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 954ad6bfb56a..3212b39b5bfc 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -22,7 +22,8 @@ struct sock;
struct sockaddr;
int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax);
+ const struct inet_bind_bucket *tb, bool relax,
+ bool soreuseport_ok);
struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req, u8 proto);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 197a30d221e9..85ee3879499e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -63,7 +63,8 @@ struct inet_connection_sock_af_ops {
#endif
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
int (*bind_conflict)(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax);
+ const struct inet_bind_bucket *tb,
+ bool relax, bool soreuseport_ok);
void (*mtu_reduced)(struct sock *sk);
};
@@ -261,7 +262,8 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax);
+ const struct inet_bind_bucket *tb, bool relax,
+ bool soreuseport_ok);
int inet_csk_get_port(struct sock *sk, unsigned short snum);
struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
@@ -289,11 +291,6 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
}
-static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
-{
- return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
-}
-
static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 236a81034fef..c9cff977a7fb 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -228,6 +228,7 @@ struct inet_sock {
#define IP_CMSG_PASSSEC BIT(5)
#define IP_CMSG_ORIGDSTADDR BIT(6)
#define IP_CMSG_CHECKSUM BIT(7)
+#define IP_CMSG_RECVFRAGSIZE BIT(8)
/**
* sk_to_full_sk - Access to a full socket
diff --git a/include/net/ip.h b/include/net/ip.h
index d3a107850a41..ab6761a7c883 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -178,6 +178,7 @@ struct ip_reply_arg {
/* -1 if not needed */
int bound_dev_if;
u8 tos;
+ kuid_t uid;
};
#define IP_REPLY_ARG_NOSRCCHECK 1
@@ -577,7 +578,8 @@ int ip_options_rcv_srr(struct sk_buff *skb);
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -599,7 +601,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
- ip_cmsg_recv_offset(msg, skb, 0, 0);
+ ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
}
bool icmp_global_allow(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f83e78d071a3..9dc2c182a263 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -140,9 +140,10 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
const struct in6_addr *gwaddr);
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
- u32 mark);
+ u32 mark, kuid_t uid);
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
-void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+ kuid_t uid);
void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
u32 mark);
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f390c3bb05c5..5f376af377c7 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -221,7 +221,8 @@ enum fib_event_type {
FIB_EVENT_RULE_DEL,
};
-int register_fib_notifier(struct notifier_block *nb);
+int register_fib_notifier(struct notifier_block *nb,
+ void (*cb)(struct notifier_block *nb));
int unregister_fib_notifier(struct notifier_block *nb);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info);
@@ -397,6 +398,11 @@ static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
void free_fib_info(struct fib_info *fi);
+static inline void fib_info_hold(struct fib_info *fi)
+{
+ atomic_inc(&fi->fib_clntref);
+}
+
static inline void fib_info_put(struct fib_info *fi)
{
if (atomic_dec_and_test(&fi->fib_clntref))
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 59557c07904b..e893fe43dd13 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -129,7 +129,7 @@ struct ip_tunnel {
#endif
struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
unsigned int prl_count; /* # of entries in PRL */
- int ip_tnl_net_id;
+ unsigned int ip_tnl_net_id;
struct gro_cells gro_cells;
bool collect_md;
bool ignore_df;
@@ -248,7 +248,7 @@ void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
int ip_tunnel_get_iflink(const struct net_device *dev);
-int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
@@ -275,7 +275,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
-void ip_tunnel_setup(struct net_device *dev, int net_id);
+void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
struct ip_tunnel_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f11ca837361b..487e57391664 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -932,7 +932,8 @@ int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
*/
void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
- u8 *proto, struct in6_addr **daddr_p);
+ u8 *proto, struct in6_addr **daddr_p,
+ struct in6_addr *saddr);
void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
u8 *proto);
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index ea3f80f58fd6..d4c1c75b8862 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -24,11 +24,11 @@ enum {
struct lwtunnel_state {
__u16 type;
__u16 flags;
+ __u16 headroom;
atomic_t refcnt;
int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*orig_input)(struct sk_buff *);
- int len;
- __u16 headroom;
+ struct rcu_head rcu;
__u8 data[0];
};
@@ -36,6 +36,7 @@ struct lwtunnel_encap_ops {
int (*build_state)(struct net_device *dev, struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts);
+ void (*destroy_state)(struct lwtunnel_state *lws);
int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*input)(struct sk_buff *skb);
int (*fill_encap)(struct sk_buff *skb,
@@ -46,10 +47,7 @@ struct lwtunnel_encap_ops {
};
#ifdef CONFIG_LWTUNNEL
-static inline void lwtstate_free(struct lwtunnel_state *lws)
-{
- kfree(lws);
-}
+void lwtstate_free(struct lwtunnel_state *lws);
static inline struct lwtunnel_state *
lwtstate_get(struct lwtunnel_state *lws)
@@ -96,7 +94,8 @@ static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
unsigned int mtu)
{
- if (lwtunnel_xmit_redirect(lwtstate) && lwtstate->headroom < mtu)
+ if ((lwtunnel_xmit_redirect(lwtstate) ||
+ lwtunnel_output_redirect(lwtstate)) && lwtstate->headroom < mtu)
return lwtstate->headroom;
return 0;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index e2dba93e374f..5345d358a510 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1442,7 +1442,7 @@ enum ieee80211_vif_flags {
struct ieee80211_vif {
enum nl80211_iftype type;
struct ieee80211_bss_conf bss_conf;
- u8 addr[ETH_ALEN];
+ u8 addr[ETH_ALEN] __aligned(2);
bool p2p;
bool csa_active;
bool mu_mimo_owner;
@@ -1749,7 +1749,8 @@ struct ieee80211_sta_rates {
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void \*), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
- * if wme is supported.
+ * if wme is supported. The bits order is like in
+ * IEEE80211_WMM_IE_STA_QOSINFO_AC_*.
* @max_sp: max Service Period. Only valid if wme is supported.
* @bandwidth: current bandwidth the station can receive with
* @rx_nss: in HT/VHT, the maximum number of spatial streams the
@@ -2029,6 +2030,10 @@ struct ieee80211_txq {
* drivers, mac80211 packet loss mechanism will not be triggered and driver
* is completely depending on firmware event for station kickout.
*
+ * @IEEE80211_HW_SUPPORTS_TX_FRAG: Hardware does fragmentation by itself.
+ * The stack will not do fragmentation.
+ * The callback for @set_frag_threshold should be set as well.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2070,6 +2075,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_TX_AMSDU,
IEEE80211_HW_TX_FRAG_LIST,
IEEE80211_HW_REPORTS_LOW_ACK,
+ IEEE80211_HW_SUPPORTS_TX_FRAG,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3098,8 +3104,9 @@ enum ieee80211_reconfig_type {
* The callback must be atomic.
*
* @set_frag_threshold: Configuration of fragmentation threshold. Assign this
- * if the device does fragmentation by itself; if this callback is
- * implemented then the stack will not do fragmentation.
+ * if the device does fragmentation by itself. Note that to prevent the
+ * stack from doing fragmentation IEEE80211_HW_SUPPORTS_TX_FRAG
+ * should be set as well.
* The callback can sleep.
*
* @set_rts_threshold: Configuration of RTS threshold (if device needs it)
@@ -4092,6 +4099,10 @@ void ieee80211_sta_pspoll(struct ieee80211_sta *sta);
* This must be used in conjunction with ieee80211_sta_ps_transition()
* and possibly ieee80211_sta_pspoll(); calls to all three must be
* serialized.
+ * %IEEE80211_NUM_TIDS can be passed as the tid if the tid is unknown.
+ * In this case, mac80211 will not check that this tid maps to an AC
+ * that is trigger enabled and assume that the caller did the proper
+ * checks.
*/
void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index be1fe2283254..d562a2fe4860 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -31,6 +31,7 @@ enum {
ND_OPT_PREFIX_INFO = 3, /* RFC2461 */
ND_OPT_REDIRECT_HDR = 4, /* RFC2461 */
ND_OPT_MTU = 5, /* RFC2461 */
+ ND_OPT_NONCE = 14, /* RFC7527 */
__ND_OPT_ARRAY_MAX,
ND_OPT_ROUTE_INFO = 24, /* RFC4191 */
ND_OPT_RDNSS = 25, /* RFC5006 */
@@ -121,6 +122,7 @@ struct ndisc_options {
#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END]
#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
#define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
+#define nd_opts_nonce nd_opt_array[ND_OPT_NONCE]
#define nd_802154_opts_src_lladdr nd_802154_opt_array[ND_OPT_SOURCE_LL_ADDR]
#define nd_802154_opts_tgt_lladdr nd_802154_opt_array[ND_OPT_TARGET_LL_ADDR]
@@ -398,7 +400,8 @@ void ndisc_cleanup(void);
int ndisc_rcv(struct sk_buff *skb);
void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr);
+ const struct in6_addr *daddr, const struct in6_addr *saddr,
+ u64 nonce);
void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 0940598c002f..af8fe8a909dc 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -291,7 +291,7 @@ struct pernet_operations {
int (*init)(struct net *net);
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
- int *id;
+ unsigned int *id;
size_t size;
};
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 981c327374da..919e4e8af327 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -15,6 +15,15 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
+#endif
int nf_conntrack_ipv4_compat_init(void);
void nf_conntrack_ipv4_compat_fini(void);
diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
index f01ef208dff6..db405f70e538 100644
--- a/include/net/netfilter/ipv4/nf_defrag_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h
@@ -1,6 +1,7 @@
#ifndef _NF_DEFRAG_IPV4_H
#define _NF_DEFRAG_IPV4_H
-void nf_defrag_ipv4_enable(void);
+struct net;
+int nf_defrag_ipv4_enable(struct net *);
#endif /* _NF_DEFRAG_IPV4_H */
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index a4c993685795..eaea968f8657 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -6,6 +6,15 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
+#endif
#include <linux/sysctl.h>
extern struct ctl_table nf_ct_ipv6_sysctl_table[];
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index ddf162f7966f..7664efe37974 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -1,7 +1,8 @@
#ifndef _NF_DEFRAG_IPV6_H
#define _NF_DEFRAG_IPV6_H
-void nf_defrag_ipv6_enable(void);
+struct net;
+int nf_defrag_ipv6_enable(struct net *);
int nf_ct_frag6_init(void);
void nf_ct_frag6_cleanup(void);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index d9d52c020a70..5916aa9ab3f0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -181,6 +181,10 @@ static inline void nf_ct_put(struct nf_conn *ct)
int nf_ct_l3proto_try_module_get(unsigned short l3proto);
void nf_ct_l3proto_module_put(unsigned short l3proto);
+/* load module; enable/disable conntrack in this namespace */
+int nf_ct_netns_get(struct net *net, u8 nfproto);
+void nf_ct_netns_put(struct net *net, u8 nfproto);
+
/*
* Allocate a hashtable of hlist_head (if nulls == 0),
* or hlist_nulls_head (if nulls == 1)
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 8992e4229da9..e01559b4d781 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -52,6 +52,10 @@ struct nf_conntrack_l3proto {
int (*tuple_to_nlattr)(struct sk_buff *skb,
const struct nf_conntrack_tuple *t);
+ /* Called when netns wants to use connection tracking */
+ int (*net_ns_get)(struct net *);
+ void (*net_ns_put)(struct net *);
+
/*
* Calculate size of tuple nlattr
*/
@@ -63,18 +67,24 @@ struct nf_conntrack_l3proto {
size_t nla_size;
- /* Init l3proto pernet data */
- int (*init_net)(struct net *net);
-
/* Module (if any) which this is connected to. */
struct module *me;
};
extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
+#ifdef CONFIG_SYSCTL
/* Protocol pernet registration. */
int nf_ct_l3proto_pernet_register(struct net *net,
struct nf_conntrack_l3proto *proto);
+#else
+static inline int nf_ct_l3proto_pernet_register(struct net *n,
+ struct nf_conntrack_l3proto *p)
+{
+ return 0;
+}
+#endif
+
void nf_ct_l3proto_pernet_unregister(struct net *net,
struct nf_conntrack_l3proto *proto);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index de629f1520df..e7b836590f0b 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -98,7 +98,7 @@ struct nf_conntrack_l4proto {
const struct nla_policy *nla_policy;
} ctnl_timeout;
#endif
- int *net_id;
+ unsigned int *net_id;
/* Init l4proto pernet data */
int (*init_net)(struct net *net, u_int16_t proto);
@@ -125,14 +125,24 @@ struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
/* Protocol pernet registration. */
+int nf_ct_l4proto_pernet_register_one(struct net *net,
+ struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_pernet_unregister_one(struct net *net,
+ struct nf_conntrack_l4proto *proto);
int nf_ct_l4proto_pernet_register(struct net *net,
- struct nf_conntrack_l4proto *proto);
+ struct nf_conntrack_l4proto *proto[],
+ unsigned int num_proto);
void nf_ct_l4proto_pernet_unregister(struct net *net,
- struct nf_conntrack_l4proto *proto);
+ struct nf_conntrack_l4proto *proto[],
+ unsigned int num_proto);
/* Protocol global registration. */
-int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
-void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[],
+ unsigned int num_proto);
+void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[],
+ unsigned int num_proto);
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index e6937318546c..b0ca402c1f72 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -54,7 +54,7 @@ struct synproxy_net {
struct synproxy_stats __percpu *stats;
};
-extern int synproxy_net_id;
+extern unsigned int synproxy_net_id;
static inline struct synproxy_net *synproxy_pernet(struct net *net)
{
return net_generic(net, synproxy_net_id);
diff --git a/include/net/netfilter/nf_dup_netdev.h b/include/net/netfilter/nf_dup_netdev.h
index 397dcae349f9..3e919356bedf 100644
--- a/include/net/netfilter/nf_dup_netdev.h
+++ b/include/net/netfilter/nf_dup_netdev.h
@@ -2,5 +2,6 @@
#define _NF_DUP_NETDEV_H_
void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif);
+void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif);
#endif
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 309cd267be4f..450f87f95415 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -109,5 +109,12 @@ void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix);
+void nf_log_l2packet(struct net *net, u_int8_t pf,
+ __be16 protocol,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo, const char *prefix);
#endif /* _NF_LOG_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
index 12f4cc841b6e..3923150f2a1e 100644
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -54,6 +54,15 @@ extern const struct nf_nat_l4proto nf_nat_l4proto_udp;
extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
+#ifdef CONFIG_NF_NAT_PROTO_DCCP
+extern const struct nf_nat_l4proto nf_nat_l4proto_dccp;
+#endif
+#ifdef CONFIG_NF_NAT_PROTO_SCTP
+extern const struct nf_nat_l4proto nf_nat_l4proto_sctp;
+#endif
+#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
+extern const struct nf_nat_l4proto nf_nat_l4proto_udplite;
+#endif
bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 2280cfe86c56..09948d10e38e 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -12,6 +12,7 @@ struct nf_queue_entry {
unsigned int id;
struct nf_hook_state state;
+ struct nf_hook_entry *hook;
u16 size; /* sizeof(entry) + saved route keys */
/* extra space to store route keys */
diff --git a/include/net/netfilter/nf_socket.h b/include/net/netfilter/nf_socket.h
new file mode 100644
index 000000000000..f2fc39c97d43
--- /dev/null
+++ b/include/net/netfilter/nf_socket.h
@@ -0,0 +1,27 @@
+#ifndef _NF_SOCK_H_
+#define _NF_SOCK_H_
+
+struct net_device;
+struct sk_buff;
+struct sock;
+struct net;
+
+static inline bool nf_sk_is_transparent(struct sock *sk)
+{
+ switch (sk->sk_state) {
+ case TCP_TIME_WAIT:
+ return inet_twsk(sk)->tw_transparent;
+ case TCP_NEW_SYN_RECV:
+ return inet_rsk(inet_reqsk(sk))->no_srccheck;
+ default:
+ return inet_sk(sk)->transparent;
+ }
+}
+
+struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
+ const struct net_device *indev);
+
+struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
+ const struct net_device *indev);
+
+#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index b02af0bf5777..924325c46aab 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -14,27 +14,43 @@
struct nft_pktinfo {
struct sk_buff *skb;
- struct net *net;
- const struct net_device *in;
- const struct net_device *out;
- u8 pf;
- u8 hook;
bool tprot_set;
u8 tprot;
/* for x_tables compatibility */
struct xt_action_param xt;
};
+static inline struct net *nft_net(const struct nft_pktinfo *pkt)
+{
+ return pkt->xt.state->net;
+}
+
+static inline unsigned int nft_hook(const struct nft_pktinfo *pkt)
+{
+ return pkt->xt.state->hook;
+}
+
+static inline u8 nft_pf(const struct nft_pktinfo *pkt)
+{
+ return pkt->xt.state->pf;
+}
+
+static inline const struct net_device *nft_in(const struct nft_pktinfo *pkt)
+{
+ return pkt->xt.state->in;
+}
+
+static inline const struct net_device *nft_out(const struct nft_pktinfo *pkt)
+{
+ return pkt->xt.state->out;
+}
+
static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
pkt->skb = skb;
- pkt->net = pkt->xt.net = state->net;
- pkt->in = pkt->xt.in = state->in;
- pkt->out = pkt->xt.out = state->out;
- pkt->hook = pkt->xt.hooknum = state->hook;
- pkt->pf = pkt->xt.family = state->pf;
+ pkt->xt.state = state;
}
static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
@@ -243,7 +259,8 @@ struct nft_expr;
* @lookup: look up an element within the set
* @insert: insert new element into set
* @activate: activate new element in the next generation
- * @deactivate: deactivate element in the next generation
+ * @deactivate: lookup for element and deactivate it in the next generation
+ * @deactivate_one: deactivate element in the next generation
* @remove: remove element from set
* @walk: iterate over all set elemeennts
* @privsize: function to return size of set private data
@@ -278,6 +295,9 @@ struct nft_set_ops {
void * (*deactivate)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem);
+ bool (*deactivate_one)(const struct net *net,
+ const struct nft_set *set,
+ void *priv);
void (*remove)(const struct nft_set *set,
const struct nft_set_elem *elem);
void (*walk)(const struct nft_ctx *ctx,
@@ -310,6 +330,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @name: name of the set
* @ktype: key type (numeric type defined by userspace, not used in the kernel)
* @dtype: data type (verdict or numeric type defined by userspace)
+ * @objtype: object type (see NFT_OBJECT_* definitions)
* @size: maximum set size
* @nelems: number of elements
* @ndeact: number of deactivated elements queued for removal
@@ -331,6 +352,7 @@ struct nft_set {
char name[NFT_SET_MAXNAMELEN];
u32 ktype;
u32 dtype;
+ u32 objtype;
u32 size;
atomic_t nelems;
u32 ndeact;
@@ -400,6 +422,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
* @NFT_SET_EXT_EXPIRATION: element expiration time
* @NFT_SET_EXT_USERDATA: user data associated with the element
* @NFT_SET_EXT_EXPR: expression assiociated with the element
+ * @NFT_SET_EXT_OBJREF: stateful object reference associated with element
* @NFT_SET_EXT_NUM: number of extension types
*/
enum nft_set_extensions {
@@ -410,6 +433,7 @@ enum nft_set_extensions {
NFT_SET_EXT_EXPIRATION,
NFT_SET_EXT_USERDATA,
NFT_SET_EXT_EXPR,
+ NFT_SET_EXT_OBJREF,
NFT_SET_EXT_NUM
};
@@ -538,6 +562,11 @@ static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
return elem + set->ops->elemsize;
}
+static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_OBJREF);
+}
+
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *data,
@@ -859,6 +888,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
* @list: used internally
* @chains: chains in the table
* @sets: sets in the table
+ * @objects: stateful objects in the table
* @hgenerator: handle generator state
* @use: number of chain references to this table
* @flags: table flag (see enum nft_table_flags)
@@ -869,6 +899,7 @@ struct nft_table {
struct list_head list;
struct list_head chains;
struct list_head sets;
+ struct list_head objects;
u64 hgenerator;
u32 use;
u16 flags:14,
@@ -919,6 +950,80 @@ int nft_verdict_dump(struct sk_buff *skb, int type,
const struct nft_verdict *v);
/**
+ * struct nft_object - nf_tables stateful object
+ *
+ * @list: table stateful object list node
+ * @table: table this object belongs to
+ * @type: pointer to object type
+ * @data: pointer to object data
+ * @name: name of this stateful object
+ * @genmask: generation mask
+ * @use: number of references to this stateful object
+ * @data: object data, layout depends on type
+ */
+struct nft_object {
+ struct list_head list;
+ char name[NFT_OBJ_MAXNAMELEN];
+ struct nft_table *table;
+ u32 genmask:2,
+ use:30;
+ /* runtime data below here */
+ const struct nft_object_type *type ____cacheline_aligned;
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_obj_data(const struct nft_object *obj)
+{
+ return (void *)obj->data;
+}
+
+#define nft_expr_obj(expr) *((struct nft_object **)nft_expr_priv(expr))
+
+struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
+ const struct nlattr *nla, u32 objtype,
+ u8 genmask);
+
+int nft_obj_notify(struct net *net, struct nft_table *table,
+ struct nft_object *obj, u32 portid, u32 seq,
+ int event, int family, int report, gfp_t gfp);
+
+/**
+ * struct nft_object_type - stateful object type
+ *
+ * @eval: stateful object evaluation function
+ * @list: list node in list of object types
+ * @type: stateful object numeric type
+ * @size: stateful object size
+ * @owner: module owner
+ * @maxattr: maximum netlink attribute
+ * @policy: netlink attribute policy
+ * @init: initialize object from netlink attributes
+ * @destroy: release existing stateful object
+ * @dump: netlink dump stateful object
+ */
+struct nft_object_type {
+ void (*eval)(struct nft_object *obj,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+ struct list_head list;
+ u32 type;
+ unsigned int size;
+ unsigned int maxattr;
+ struct module *owner;
+ const struct nla_policy *policy;
+ int (*init)(const struct nlattr * const tb[],
+ struct nft_object *obj);
+ void (*destroy)(struct nft_object *obj);
+ int (*dump)(struct sk_buff *skb,
+ struct nft_object *obj,
+ bool reset);
+};
+
+int nft_register_obj(struct nft_object_type *obj_type);
+void nft_unregister_obj(struct nft_object_type *obj_type);
+
+/**
* struct nft_traceinfo - nft tracing information and state
*
* @pkt: pktinfo currently processed
@@ -965,6 +1070,9 @@ void nft_trace_notify(struct nft_traceinfo *info);
#define MODULE_ALIAS_NFT_SET() \
MODULE_ALIAS("nft-set")
+#define MODULE_ALIAS_NFT_OBJ(type) \
+ MODULE_ALIAS("nft-obj-" __stringify(type))
+
/*
* The gencursor defines two generations, the currently active and the
* next one. Objects contain a bitmask of 2 bits specifying the generations
@@ -1141,4 +1249,11 @@ struct nft_trans_elem {
#define nft_trans_elem(trans) \
(((struct nft_trans_elem *)trans->data)->elem)
+struct nft_trans_obj {
+ struct nft_object *obj;
+};
+
+#define nft_trans_obj(trans) \
+ (((struct nft_trans_obj *)trans->data)->obj)
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 00f4f6b1b1ba..8f690effec37 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -1,12 +1,18 @@
#ifndef _NET_NF_TABLES_CORE_H
#define _NET_NF_TABLES_CORE_H
+extern struct nft_expr_type nft_imm_type;
+extern struct nft_expr_type nft_cmp_type;
+extern struct nft_expr_type nft_lookup_type;
+extern struct nft_expr_type nft_bitwise_type;
+extern struct nft_expr_type nft_byteorder_type;
+extern struct nft_expr_type nft_payload_type;
+extern struct nft_expr_type nft_dynset_type;
+extern struct nft_expr_type nft_range_type;
+
int nf_tables_core_module_init(void);
void nf_tables_core_module_exit(void);
-int nft_immediate_module_init(void);
-void nft_immediate_module_exit(void);
-
struct nft_cmp_fast_expr {
u32 data;
enum nft_registers sreg:8;
@@ -25,24 +31,6 @@ static inline u32 nft_cmp_fast_mask(unsigned int len)
extern const struct nft_expr_ops nft_cmp_fast_ops;
-int nft_cmp_module_init(void);
-void nft_cmp_module_exit(void);
-
-int nft_range_module_init(void);
-void nft_range_module_exit(void);
-
-int nft_lookup_module_init(void);
-void nft_lookup_module_exit(void);
-
-int nft_dynset_module_init(void);
-void nft_dynset_module_exit(void);
-
-int nft_bitwise_module_init(void);
-void nft_bitwise_module_exit(void);
-
-int nft_byteorder_module_init(void);
-void nft_byteorder_module_exit(void);
-
struct nft_payload {
enum nft_payload_bases base:8;
u8 offset;
@@ -57,12 +45,10 @@ struct nft_payload_set {
enum nft_registers sreg:8;
u8 csum_type;
u8 csum_offset;
+ u8 csum_flags;
};
extern const struct nft_expr_ops nft_payload_fast_ops;
extern struct static_key_false nft_trace_enabled;
-int nft_payload_module_init(void);
-void nft_payload_module_exit(void);
-
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
new file mode 100644
index 000000000000..cbedda077db2
--- /dev/null
+++ b/include/net/netfilter/nft_fib.h
@@ -0,0 +1,31 @@
+#ifndef _NFT_FIB_H_
+#define _NFT_FIB_H_
+
+struct nft_fib {
+ enum nft_registers dreg:8;
+ u8 result;
+ u32 flags;
+};
+
+extern const struct nla_policy nft_fib_policy[];
+
+int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
+int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nft_data **data);
+
+
+void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+
+void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt);
+
+void nft_fib_store_result(void *reg, enum nft_fib_result r,
+ const struct nft_pktinfo *pkt, int index);
+#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 79f45e19f31e..130e58361f99 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -1,19 +1,23 @@
#ifndef _XT_RATEEST_H
#define _XT_RATEEST_H
+#include <net/gen_stats.h>
+
struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
struct gnet_stats_basic_packed bstats;
spinlock_t lock;
- /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
- struct gnet_stats_rate_est64 rstats;
+
/* following fields not accessed in hot path */
+ unsigned int refcnt;
struct hlist_node list;
char name[IFNAMSIZ];
- unsigned int refcnt;
struct gnet_estimator params;
struct rcu_head rcu;
+
+ /* keep this field far away to speedup xt_rateest_mt() */
+ struct net_rate_estimator __rcu *rate_est;
};
struct xt_rateest *xt_rateest_lookup(const char *name);
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 254a0fc01800..d3938f11ae52 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -713,7 +713,7 @@ static inline int nla_ok(const struct nlattr *nla, int remaining)
*/
static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
{
- int totlen = NLA_ALIGN(nla->nla_len);
+ unsigned int totlen = NLA_ALIGN(nla->nla_len);
*remaining -= totlen;
return (struct nlattr *) ((char *) nla + totlen);
@@ -1191,6 +1191,16 @@ static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
}
/**
+ * nla_memdup - duplicate attribute memory (kmemdup)
+ * @src: netlink attribute to duplicate from
+ * @gfp: GFP mask
+ */
+static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp)
+{
+ return kmemdup(nla_data(src), nla_len(src), gfp);
+}
+
+/**
* nla_nest_start - Start a new level of nested attributes
* @skb: socket buffer to add attributes to
* @attrtype: attribute type of container
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index e469e85de3f9..cf799fc3fdec 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -6,6 +6,12 @@
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+#include <linux/netfilter/nf_conntrack_dccp.h>
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+#include <linux/netfilter/nf_conntrack_sctp.h>
+#endif
#include <linux/seqlock.h>
struct ctl_table_header;
@@ -48,12 +54,49 @@ struct nf_icmp_net {
unsigned int timeout;
};
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+struct nf_dccp_net {
+ struct nf_proto_net pn;
+ int dccp_loose;
+ unsigned int dccp_timeout[CT_DCCP_MAX + 1];
+};
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+struct nf_sctp_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[SCTP_CONNTRACK_MAX];
+};
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+enum udplite_conntrack {
+ UDPLITE_CT_UNREPLIED,
+ UDPLITE_CT_REPLIED,
+ UDPLITE_CT_MAX
+};
+
+struct nf_udplite_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[UDPLITE_CT_MAX];
+};
+#endif
+
struct nf_ip_net {
struct nf_generic_net generic;
struct nf_tcp_net tcp;
struct nf_udp_net udp;
struct nf_icmp_net icmp;
struct nf_icmp_net icmpv6;
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ struct nf_dccp_net dccp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ struct nf_sctp_net sctp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ struct nf_udplite_net udplite;
+#endif
};
struct ct_pcpu {
@@ -91,7 +134,6 @@ struct netns_ct {
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
unsigned int labels_used;
- u8 label_words;
#endif
};
#endif
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 70e158551704..f15daaa89385 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -25,20 +25,24 @@
*/
struct net_generic {
- unsigned int len;
- struct rcu_head rcu;
-
- void *ptr[0];
+ union {
+ struct {
+ unsigned int len;
+ struct rcu_head rcu;
+ } s;
+
+ void *ptr[0];
+ };
};
-static inline void *net_generic(const struct net *net, int id)
+static inline void *net_generic(const struct net *net, unsigned int id)
{
struct net_generic *ng;
void *ptr;
rcu_read_lock();
ng = rcu_dereference(net->gen);
- ptr = ng->ptr[id - 1];
+ ptr = ng->ptr[id];
rcu_read_unlock();
return ptr;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 7adf4386ac8f..0378e88f6fd3 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -110,6 +110,7 @@ struct netns_ipv4 {
int sysctl_tcp_orphan_retries;
int sysctl_tcp_fin_timeout;
unsigned int sysctl_tcp_notsent_lowat;
+ int sysctl_tcp_tw_reuse;
int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf;
@@ -135,6 +136,9 @@ struct netns_ipv4 {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int sysctl_fib_multipath_use_neigh;
#endif
+
+ unsigned int fib_seq; /* protected by rtnl_mutex */
+
atomic_t rt_genid;
};
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 10d0848f5b8a..de7745e2edcc 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -85,6 +85,7 @@ struct netns_ipv6 {
#endif
atomic_t dev_addr_genid;
atomic_t fib6_sernum;
+ struct seg6_pernet_data *seg6_data;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 58487b1cc99a..cea396b53a60 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -17,5 +17,11 @@ struct netns_nf {
struct ctl_table_header *nf_log_dir_header;
#endif
struct nf_hook_entry __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+ bool defrag_ipv4;
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ bool defrag_ipv6;
+#endif
};
#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 767b03a3fe67..f0a051480c6c 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -171,6 +171,8 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
+int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
+ struct net_device **hw_dev);
/**
* struct tcf_pkt_info - packet information
@@ -425,16 +427,14 @@ struct tc_cls_u32_offload {
};
};
-static inline bool tc_should_offload(const struct net_device *dev,
- const struct tcf_proto *tp, u32 flags)
+static inline bool tc_can_offload(const struct net_device *dev,
+ const struct tcf_proto *tp)
{
const struct Qdisc *sch = tp->q;
const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
if (!(dev->features & NETIF_F_HW_TC))
return false;
- if (flags & TCA_CLS_FLAGS_SKIP_HW)
- return false;
if (!dev->netdev_ops->ndo_setup_tc)
return false;
if (cops && cops->tcf_cl_offload)
@@ -443,6 +443,19 @@ static inline bool tc_should_offload(const struct net_device *dev,
return true;
}
+static inline bool tc_skip_hw(u32 flags)
+{
+ return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
+}
+
+static inline bool tc_should_offload(const struct net_device *dev,
+ const struct tcf_proto *tp, u32 flags)
+{
+ if (tc_skip_hw(flags))
+ return false;
+ return tc_can_offload(dev, tp);
+}
+
static inline bool tc_skip_sw(u32 flags)
{
return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index cd334c9584e9..f1b76b8e6d2d 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -6,6 +6,8 @@
#include <linux/if_vlan.h>
#include <net/sch_generic.h>
+#define DEFAULT_TX_QUEUE_LEN 1000
+
struct qdisc_walker {
int stop;
int skip;
diff --git a/include/net/raw.h b/include/net/raw.h
index 3e789008394d..57c33dd22ec4 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -23,6 +23,12 @@
extern struct proto raw_prot;
+extern struct raw_hashinfo raw_v4_hashinfo;
+struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
+ unsigned short num, __be32 raddr,
+ __be32 laddr, int dif);
+
+int raw_abort(struct sock *sk, int err);
void raw_icmp_error(struct sk_buff *, int, u32);
int raw_local_deliver(struct sk_buff *, int);
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index 87783dea0791..cbe4e9de1894 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -3,6 +3,13 @@
#include <net/protocol.h>
+extern struct raw_hashinfo raw_v6_hashinfo;
+struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
+ unsigned short num, const struct in6_addr *loc_addr,
+ const struct in6_addr *rmt_addr, int dif);
+
+int raw_abort(struct sock *sk, int err);
+
void raw6_icmp_error(struct sk_buff *, int nexthdr,
u8 type, u8 code, int inner_offset, __be32);
bool raw6_local_deliver(struct sk_buff *, int);
diff --git a/include/net/red.h b/include/net/red.h
index 76e0b5f922c6..208e718e16b9 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -207,7 +207,7 @@ static inline void red_set_parms(struct red_parms *p,
static inline int red_is_idling(const struct red_vars *v)
{
- return v->qidlestart.tv64 != 0;
+ return v->qidlestart != 0;
}
static inline void red_start_of_idle_period(struct red_vars *v)
@@ -217,7 +217,7 @@ static inline void red_start_of_idle_period(struct red_vars *v)
static inline void red_end_of_idle_period(struct red_vars *v)
{
- v->qidlestart.tv64 = 0;
+ v->qidlestart = 0;
}
static inline void red_restart(struct red_vars *v)
diff --git a/include/net/route.h b/include/net/route.h
index 0429d47cad25..c0874c87c173 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -153,7 +153,7 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
RT_SCOPE_UNIVERSE, proto,
sk ? inet_sk_flowi_flags(sk) : 0,
- daddr, saddr, dport, sport);
+ daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
return ip_route_output_flow(net, fl4, sk);
@@ -269,7 +269,8 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
flow_flags |= FLOWI_FLAG_ANYSRC;
flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
- protocol, flow_flags, dst, src, dport, sport);
+ protocol, flow_flags, dst, src, dport, sport,
+ sk->sk_uid);
}
static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e6aa0a249672..498f81b229a4 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -76,7 +76,7 @@ struct Qdisc {
struct netdev_queue *dev_queue;
- struct gnet_stats_rate_est64 rate_est;
+ struct net_rate_estimator __rcu *rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 31acc3f4f132..d8833a86cd7e 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -69,7 +69,7 @@
#include <net/ip6_route.h>
#endif
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/page.h>
#include <net/sock.h>
#include <net/snmp.h>
@@ -164,7 +164,7 @@ void sctp_backlog_migrate(struct sctp_association *assoc,
struct sock *oldsk, struct sock *newsk);
int sctp_transport_hashtable_init(void);
void sctp_transport_hashtable_destroy(void);
-void sctp_hash_transport(struct sctp_transport *t);
+int sctp_hash_transport(struct sctp_transport *t);
void sctp_unhash_transport(struct sctp_transport *t);
struct sctp_transport *sctp_addrs_lookup_transport(
struct net *net,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 11c3bf262a85..92daabdc007d 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -124,7 +124,7 @@ extern struct sctp_globals {
/* This is the sctp port control hash. */
struct sctp_bind_hashbucket *port_hashtable;
/* This is the hash of all transports. */
- struct rhashtable transport_hashtable;
+ struct rhltable transport_hashtable;
/* Sizes of above hashtables. */
int ep_hashsize;
@@ -530,7 +530,6 @@ struct sctp_datamsg {
/* Did the messenge fail to send? */
int send_error;
u8 send_failed:1,
- can_abandon:1, /* can chunks from this message can be abandoned. */
can_delay; /* should this message be Nagle delayed */
};
@@ -641,7 +640,6 @@ struct sctp_chunk {
#define SCTP_NEED_FRTX 0x1
#define SCTP_DONT_FRTX 0x2
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
- resent:1, /* Has this chunk ever been resent. */
has_tsn:1, /* Does this chunk have a TSN yet? */
has_ssn:1, /* Does this chunk have a SSN yet? */
singleton:1, /* Only chunk in the packet? */
@@ -656,6 +654,7 @@ struct sctp_chunk {
fast_retransmit:2; /* Is this chunk fast retransmitted? */
};
+#define sctp_chunk_retransmitted(chunk) (chunk->sent_count > 1)
void sctp_chunk_hold(struct sctp_chunk *);
void sctp_chunk_put(struct sctp_chunk *);
int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
@@ -762,7 +761,7 @@ static inline int sctp_packet_empty(struct sctp_packet *packet)
struct sctp_transport {
/* A list of transports. */
struct list_head transports;
- struct rhash_head node;
+ struct rhlist_head node;
/* Reference counting. */
atomic_t refcnt;
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 3f36d45b714a..0caee631a836 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -6,10 +6,10 @@
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
-__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport);
-__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
- __be16 sport, __be16 dport);
+u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport, u32 *tsoff);
+u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport, u32 *tsoff);
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
diff --git a/include/net/seg6.h b/include/net/seg6.h
new file mode 100644
index 000000000000..4e0357517d79
--- /dev/null
+++ b/include/net/seg6.h
@@ -0,0 +1,62 @@
+/*
+ * SR-IPv6 implementation
+ *
+ * Author:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _NET_SEG6_H
+#define _NET_SEG6_H
+
+#include <linux/net.h>
+#include <linux/ipv6.h>
+#include <net/lwtunnel.h>
+#include <linux/seg6.h>
+#include <linux/rhashtable.h>
+
+static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
+ __be32 to)
+{
+ __be32 diff[] = { ~from, to };
+
+ skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
+}
+
+static inline void update_csum_diff16(struct sk_buff *skb, __be32 *from,
+ __be32 *to)
+{
+ __be32 diff[] = {
+ ~from[0], ~from[1], ~from[2], ~from[3],
+ to[0], to[1], to[2], to[3],
+ };
+
+ skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
+}
+
+struct seg6_pernet_data {
+ struct mutex lock;
+ struct in6_addr __rcu *tun_src;
+#ifdef CONFIG_IPV6_SEG6_HMAC
+ struct rhashtable hmac_infos;
+#endif
+};
+
+static inline struct seg6_pernet_data *seg6_pernet(struct net *net)
+{
+ return net->ipv6.seg6_data;
+}
+
+extern int seg6_init(void);
+extern void seg6_exit(void);
+extern int seg6_iptunnel_init(void);
+extern void seg6_iptunnel_exit(void);
+
+extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len);
+
+#endif
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
new file mode 100644
index 000000000000..69c3a106056b
--- /dev/null
+++ b/include/net/seg6_hmac.h
@@ -0,0 +1,62 @@
+/*
+ * SR-IPv6 implementation
+ *
+ * Author:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _NET_SEG6_HMAC_H
+#define _NET_SEG6_HMAC_H
+
+#include <net/flow.h>
+#include <net/ip6_fib.h>
+#include <net/sock.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/route.h>
+#include <net/seg6.h>
+#include <linux/seg6_hmac.h>
+#include <linux/rhashtable.h>
+
+#define SEG6_HMAC_MAX_DIGESTSIZE 160
+#define SEG6_HMAC_RING_SIZE 256
+
+struct seg6_hmac_info {
+ struct rhash_head node;
+ struct rcu_head rcu;
+
+ u32 hmackeyid;
+ char secret[SEG6_HMAC_SECRET_LEN];
+ u8 slen;
+ u8 alg_id;
+};
+
+struct seg6_hmac_algo {
+ u8 alg_id;
+ char name[64];
+ struct crypto_shash * __percpu *tfms;
+ struct shash_desc * __percpu *shashs;
+};
+
+extern int seg6_hmac_compute(struct seg6_hmac_info *hinfo,
+ struct ipv6_sr_hdr *hdr, struct in6_addr *saddr,
+ u8 *output);
+extern struct seg6_hmac_info *seg6_hmac_info_lookup(struct net *net, u32 key);
+extern int seg6_hmac_info_add(struct net *net, u32 key,
+ struct seg6_hmac_info *hinfo);
+extern int seg6_hmac_info_del(struct net *net, u32 key);
+extern int seg6_push_hmac(struct net *net, struct in6_addr *saddr,
+ struct ipv6_sr_hdr *srh);
+extern bool seg6_hmac_validate_skb(struct sk_buff *skb);
+extern int seg6_hmac_init(void);
+extern void seg6_hmac_exit(void);
+extern int seg6_hmac_net_init(struct net *net);
+extern void seg6_hmac_net_exit(struct net *net);
+
+#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 92b269709b9a..f0e867f58722 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -343,6 +343,9 @@ struct sock {
#define sk_rxhash __sk_common.skc_rxhash
socket_lock_t sk_lock;
+ atomic_t sk_drops;
+ int sk_rcvlowat;
+ struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@ -359,14 +362,13 @@ struct sock {
struct sk_buff *tail;
} sk_backlog;
#define sk_rmem_alloc sk_backlog.rmem_alloc
- int sk_forward_alloc;
- __u32 sk_txhash;
+ int sk_forward_alloc;
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int sk_napi_id;
unsigned int sk_ll_usec;
+ /* ===== mostly read cache line ===== */
+ unsigned int sk_napi_id;
#endif
- atomic_t sk_drops;
int sk_rcvbuf;
struct sk_filter __rcu *sk_filter;
@@ -379,16 +381,50 @@ struct sock {
#endif
struct dst_entry *sk_rx_dst;
struct dst_entry __rcu *sk_dst_cache;
- /* Note: 32bit hole on 64bit arches */
- atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_sndbuf;
+
+ /* ===== cache line for TX ===== */
+ int sk_wmem_queued;
+ atomic_t sk_wmem_alloc;
+ unsigned long sk_tsq_flags;
+ struct sk_buff *sk_send_head;
struct sk_buff_head sk_write_queue;
+ __s32 sk_peek_off;
+ int sk_write_pending;
+ long sk_sndtimeo;
+ struct timer_list sk_timer;
+ __u32 sk_priority;
+ __u32 sk_mark;
+ u32 sk_pacing_rate; /* bytes per second */
+ u32 sk_max_pacing_rate;
+ struct page_frag sk_frag;
+ netdev_features_t sk_route_caps;
+ netdev_features_t sk_route_nocaps;
+ int sk_gso_type;
+ unsigned int sk_gso_max_size;
+ gfp_t sk_allocation;
+ __u32 sk_txhash;
/*
* Because of non atomicity rules, all
* changes are protected by socket lock.
*/
+ unsigned int __sk_flags_offset[0];
+#ifdef __BIG_ENDIAN_BITFIELD
+#define SK_FL_PROTO_SHIFT 16
+#define SK_FL_PROTO_MASK 0x00ff0000
+
+#define SK_FL_TYPE_SHIFT 0
+#define SK_FL_TYPE_MASK 0x0000ffff
+#else
+#define SK_FL_PROTO_SHIFT 8
+#define SK_FL_PROTO_MASK 0x0000ff00
+
+#define SK_FL_TYPE_SHIFT 16
+#define SK_FL_TYPE_MASK 0xffff0000
+#endif
+
kmemcheck_bitfield_begin(flags);
unsigned int sk_padding : 2,
sk_no_check_tx : 1,
@@ -399,41 +435,24 @@ struct sock {
#define SK_PROTOCOL_MAX U8_MAX
kmemcheck_bitfield_end(flags);
- int sk_wmem_queued;
- gfp_t sk_allocation;
- u32 sk_pacing_rate; /* bytes per second */
- u32 sk_max_pacing_rate;
- netdev_features_t sk_route_caps;
- netdev_features_t sk_route_nocaps;
- int sk_gso_type;
- unsigned int sk_gso_max_size;
u16 sk_gso_max_segs;
- int sk_rcvlowat;
unsigned long sk_lingertime;
- struct sk_buff_head sk_error_queue;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
- __u32 sk_priority;
- __u32 sk_mark;
+ kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
- long sk_sndtimeo;
- struct timer_list sk_timer;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
- struct page_frag sk_frag;
- struct sk_buff *sk_send_head;
- __s32 sk_peek_off;
- int sk_write_pending;
#ifdef CONFIG_SECURITY
void *sk_security;
#endif
@@ -894,7 +913,20 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
- sock_rps_record_flow_hash(sk->sk_rxhash);
+ if (static_key_false(&rfs_needed)) {
+ /* Reading sk->sk_rxhash might incur an expensive cache line
+ * miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED)
+ sock_rps_record_flow_hash(sk->sk_rxhash);
+ }
#endif
}
@@ -914,14 +946,16 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
#endif
}
-#define sk_wait_event(__sk, __timeo, __condition) \
+#define sk_wait_event(__sk, __timeo, __condition, __wait) \
({ int __rc; \
release_sock(__sk); \
__rc = __condition; \
if (!__rc) { \
- *(__timeo) = schedule_timeout(*(__timeo)); \
+ *(__timeo) = wait_woken(__wait, \
+ TASK_INTERRUPTIBLE, \
+ *(__timeo)); \
} \
- sched_annotate_sleep(); \
+ sched_annotate_sleep(); \
lock_sock(__sk); \
__rc = __condition; \
__rc; \
@@ -1162,11 +1196,6 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
sk->sk_prot->enter_memory_pressure(sk);
}
-static inline long sk_prot_mem_limits(const struct sock *sk, int index)
-{
- return sk->sk_prot->sysctl_mem[index];
-}
-
static inline long
sk_memory_allocated(const struct sock *sk)
{
@@ -1276,14 +1305,32 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
/*
* Functions for memory accounting
*/
+int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
int __sk_mem_schedule(struct sock *sk, int size, int kind);
+void __sk_mem_reduce_allocated(struct sock *sk, int amount);
void __sk_mem_reclaim(struct sock *sk, int amount);
-#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
+/* We used to have PAGE_SIZE here, but systems with 64KB pages
+ * do not necessarily have 16x time more memory than 4KB ones.
+ */
+#define SK_MEM_QUANTUM 4096
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
#define SK_MEM_SEND 0
#define SK_MEM_RECV 1
+/* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+ long val = sk->sk_prot->sysctl_mem[index];
+
+#if PAGE_SIZE > SK_MEM_QUANTUM
+ val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
+#elif PAGE_SIZE < SK_MEM_QUANTUM
+ val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
+#endif
+ return val;
+}
+
static inline int sk_mem_pages(int amt)
{
return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
@@ -1651,6 +1698,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
sk->sk_wq = parent->wq;
parent->sk = sk;
sk_set_socket(sk, parent);
+ sk->sk_uid = SOCK_INODE(parent)->i_uid;
security_sock_graft(sk, parent);
write_unlock_bh(&sk->sk_callback_lock);
}
@@ -1658,6 +1706,11 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
kuid_t sock_i_uid(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
+static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
+{
+ return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
+}
+
static inline u32 net_tx_rndhash(void)
{
u32 v = prandom_u32();
@@ -1783,13 +1836,13 @@ static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
{
if (skb->ip_summed == CHECKSUM_NONE) {
__wsum csum = 0;
- if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
+ if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, offset);
} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
- if (copy_from_iter_nocache(to, copy, from) != copy)
+ if (!copy_from_iter_full_nocache(to, copy, from))
return -EFAULT;
- } else if (copy_from_iter(to, copy, from) != copy)
+ } else if (!copy_from_iter_full(to, copy, from))
return -EFAULT;
return 0;
@@ -1952,6 +2005,8 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
+ unsigned int flags);
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
@@ -2108,7 +2163,8 @@ struct sock_skb_cb {
static inline void
sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
{
- SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
+ SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
+ atomic_read(&sk->sk_drops) : 0;
}
static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
@@ -2137,8 +2193,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
- (hwtstamps->hwtstamp.tv64 &&
+ (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ (hwtstamps->hwtstamp &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 62770add15bd..604bc31e23ab 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,13 +8,13 @@ struct tcf_mirred {
struct tc_action common;
int tcfm_eaction;
int tcfm_ifindex;
- int tcfm_ok_push;
+ bool tcfm_mac_header_xmit;
struct net_device __rcu *tcfm_dev;
struct list_head tcfm_list;
};
#define to_mirred(a) ((struct tcf_mirred *)a)
-static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
+static inline bool is_tcf_mirred_egress_redirect(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->type == TCA_ACT_MIRRED)
@@ -23,7 +23,7 @@ static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
return false;
}
-static inline bool is_tcf_mirred_mirror(const struct tc_action *a)
+static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->type == TCA_ACT_MIRRED)
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 5767e9dbcf92..19cd3d345804 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -27,6 +27,7 @@ struct tcf_skbedit {
u32 flags;
u32 priority;
u32 mark;
+ u32 mask;
u16 queue_mapping;
u16 ptype;
};
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index 253f8da6c2a6..efef0b4b1b2b 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -12,6 +12,8 @@
#define __NET_TC_TUNNEL_KEY_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_tunnel_key.h>
+#include <net/dst_metadata.h>
struct tcf_tunnel_key_params {
struct rcu_head rcu;
@@ -27,4 +29,39 @@ struct tcf_tunnel_key {
#define to_tunnel_key(a) ((struct tcf_tunnel_key *)a)
+static inline bool is_tcf_tunnel_set(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+
+ if (a->ops && a->ops->type == TCA_ACT_TUNNEL_KEY)
+ return params->tcft_action == TCA_TUNNEL_KEY_ACT_SET;
+#endif
+ return false;
+}
+
+static inline bool is_tcf_tunnel_release(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+
+ if (a->ops && a->ops->type == TCA_ACT_TUNNEL_KEY)
+ return params->tcft_action == TCA_TUNNEL_KEY_ACT_RELEASE;
+#endif
+ return false;
+}
+
+static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+
+ return &params->tcft_enc_metadata->u.tun_info;
+#else
+ return NULL;
+#endif
+}
#endif /* __NET_TC_TUNNEL_KEY_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 123979fe12bf..6061963cca98 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale;
-extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto;
extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_nometrics_save;
@@ -958,6 +957,7 @@ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
u32 tcp_reno_ssthresh(struct sock *sk);
+u32 tcp_reno_undo_cwnd(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
@@ -1515,11 +1515,26 @@ struct tcp_fastopen_context {
struct rcu_head rcu;
};
+/* Latencies incurred by various limits for a sender. They are
+ * chronograph-like stats that are mutually exclusive.
+ */
+enum tcp_chrono {
+ TCP_CHRONO_UNSPEC,
+ TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
+ TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
+ TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
+ __TCP_CHRONO_MAX,
+};
+
+void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
+void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
+
/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
{
struct sk_buff *skb;
+ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
sk_wmem_free_skb(sk, skb);
sk_mem_reclaim(sk);
@@ -1578,8 +1593,10 @@ static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *
static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
{
- if (sk->sk_send_head == skb_unlinked)
+ if (sk->sk_send_head == skb_unlinked) {
sk->sk_send_head = NULL;
+ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+ }
if (tcp_sk(sk)->highest_sack == skb_unlinked)
tcp_sk(sk)->highest_sack = NULL;
}
@@ -1601,6 +1618,7 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb
/* Queue it, remembering where we must start sending. */
if (sk->sk_send_head == NULL) {
sk->sk_send_head = skb;
+ tcp_chrono_start(sk, TCP_CHRONO_BUSY);
if (tcp_sk(sk)->highest_sack == NULL)
tcp_sk(sk)->highest_sack = skb;
@@ -1808,7 +1826,7 @@ struct tcp_request_sock_ops {
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
const struct request_sock *req,
bool *strict);
- __u32 (*init_seq)(const struct sk_buff *skb);
+ __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
diff --git a/include/net/udp.h b/include/net/udp.h
index 4948790d393d..1661791e8ca1 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -246,6 +246,25 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
}
/* net/ipv4/udp.c */
+void udp_destruct_sock(struct sock *sk);
+void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
+int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
+void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
+static inline struct sk_buff *
+__skb_recv_udp(struct sock *sk, unsigned int flags, int noblock, int *peeked,
+ int *off, int *err)
+{
+ return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+ udp_skb_destructor, peeked, off, err);
+}
+static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
+ int noblock, int *err)
+{
+ int peeked, off = 0;
+
+ return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
+}
+
void udp_v4_early_demux(struct sk_buff *skb);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
@@ -258,6 +277,7 @@ void udp_flush_pending_frames(struct sock *sk);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_init_sock(struct sock *sk);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 80761938b9a7..ea340524f99b 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -20,13 +20,14 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
struct msghdr *msg = from;
- return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0;
+ return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
}
/* Designate sk as UDP-Lite socket */
static inline int udplite_sk_init(struct sock *sk)
{
udp_sk(sk)->pcflag = UDPLITE_BIT;
+ sk->sk_destruct = udp_destruct_sock;
return 0;
}
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 308adc4154f4..49a59202f85e 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -281,16 +281,6 @@ struct vxlan_dev {
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
-static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan,
- unsigned short family)
-{
-#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6)
- return inet_sk(vxlan->vn6_sock->sock->sk)->inet_sport;
-#endif
- return inet_sk(vxlan->vn4_sock->sock->sk)->inet_sport;
-}
-
static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 931a47ba4571..1beab5532035 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -205,10 +205,12 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
if (dev) {
- ip4 = (struct in_device *)dev->ip_ptr;
- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
+ ip4 = in_dev_get(dev);
+ if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) {
ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
(struct in6_addr *)gid);
+ in_dev_put(ip4);
+ }
dev_put(dev);
}
}
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 92a7d85917b4..b49258b16f4e 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -603,4 +603,10 @@ struct ib_cm_sidr_rep_param {
int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_param *param);
+/**
+ * ibcm_reject_msg - return a pointer to a reject message string.
+ * @reason: Value returned in the REJECT event status field.
+ */
+const char *__attribute_const__ ibcm_reject_msg(int reason);
+
#endif /* IB_CM_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index c8a773ffe23b..981214b3790c 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -46,7 +46,7 @@
#define IB_MGMT_BASE_VERSION 1
#define OPA_MGMT_BASE_VERSION 0x80
-#define OPA_SMP_CLASS_VERSION 0x80
+#define OPA_SM_CLASS_VERSION 0x80
/* Management classes */
#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 5ad43a487745..958a24d8fae7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -59,7 +59,7 @@
#include <linux/if_link.h>
#include <linux/atomic.h>
#include <linux/mmu_notifier.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
@@ -1102,6 +1102,7 @@ enum ib_qp_attr_mask {
IB_QP_RESERVED2 = (1<<22),
IB_QP_RESERVED3 = (1<<23),
IB_QP_RESERVED4 = (1<<24),
+ IB_QP_RATE_LIMIT = (1<<25),
};
enum ib_qp_state {
@@ -1151,6 +1152,7 @@ struct ib_qp_attr {
u8 rnr_retry;
u8 alt_port_num;
u8 alt_timeout;
+ u32 rate_limit;
};
enum ib_wr_opcode {
@@ -1592,17 +1594,19 @@ enum ib_flow_attr_type {
/* Supported steering header types */
enum ib_flow_spec_type {
/* L2 headers*/
- IB_FLOW_SPEC_ETH = 0x20,
- IB_FLOW_SPEC_IB = 0x22,
+ IB_FLOW_SPEC_ETH = 0x20,
+ IB_FLOW_SPEC_IB = 0x22,
/* L3 header*/
- IB_FLOW_SPEC_IPV4 = 0x30,
- IB_FLOW_SPEC_IPV6 = 0x31,
+ IB_FLOW_SPEC_IPV4 = 0x30,
+ IB_FLOW_SPEC_IPV6 = 0x31,
/* L4 headers*/
- IB_FLOW_SPEC_TCP = 0x40,
- IB_FLOW_SPEC_UDP = 0x41
+ IB_FLOW_SPEC_TCP = 0x40,
+ IB_FLOW_SPEC_UDP = 0x41,
+ IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
+ IB_FLOW_SPEC_INNER = 0x100,
};
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
-#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
+#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
/* Flow steering rule priority is set according to it's domain.
* Lower domain value means higher priority.
@@ -1630,7 +1634,7 @@ struct ib_flow_eth_filter {
};
struct ib_flow_spec_eth {
- enum ib_flow_spec_type type;
+ u32 type;
u16 size;
struct ib_flow_eth_filter val;
struct ib_flow_eth_filter mask;
@@ -1644,7 +1648,7 @@ struct ib_flow_ib_filter {
};
struct ib_flow_spec_ib {
- enum ib_flow_spec_type type;
+ u32 type;
u16 size;
struct ib_flow_ib_filter val;
struct ib_flow_ib_filter mask;
@@ -1669,7 +1673,7 @@ struct ib_flow_ipv4_filter {
};
struct ib_flow_spec_ipv4 {
- enum ib_flow_spec_type type;
+ u32 type;
u16 size;
struct ib_flow_ipv4_filter val;
struct ib_flow_ipv4_filter mask;
@@ -1687,7 +1691,7 @@ struct ib_flow_ipv6_filter {
};
struct ib_flow_spec_ipv6 {
- enum ib_flow_spec_type type;
+ u32 type;
u16 size;
struct ib_flow_ipv6_filter val;
struct ib_flow_ipv6_filter mask;
@@ -1701,15 +1705,30 @@ struct ib_flow_tcp_udp_filter {
};
struct ib_flow_spec_tcp_udp {
- enum ib_flow_spec_type type;
+ u32 type;
u16 size;
struct ib_flow_tcp_udp_filter val;
struct ib_flow_tcp_udp_filter mask;
};
+struct ib_flow_tunnel_filter {
+ __be32 tunnel_id;
+ u8 real_sz[0];
+};
+
+/* ib_flow_spec_tunnel describes the Vxlan tunnel
+ * the tunnel_id from val has the vni value
+ */
+struct ib_flow_spec_tunnel {
+ u32 type;
+ u16 size;
+ struct ib_flow_tunnel_filter val;
+ struct ib_flow_tunnel_filter mask;
+};
+
union ib_flow_spec {
struct {
- enum ib_flow_spec_type type;
+ u32 type;
u16 size;
};
struct ib_flow_spec_eth eth;
@@ -1717,6 +1736,7 @@ union ib_flow_spec {
struct ib_flow_spec_ipv4 ipv4;
struct ib_flow_spec_tcp_udp tcp_udp;
struct ib_flow_spec_ipv6 ipv6;
+ struct ib_flow_spec_tunnel tunnel;
};
struct ib_flow_attr {
@@ -1933,7 +1953,8 @@ struct ib_device {
struct ib_udata *udata);
int (*dealloc_pd)(struct ib_pd *pd);
struct ib_ah * (*create_ah)(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr);
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah,
struct ib_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah,
@@ -2581,6 +2602,24 @@ void ib_dealloc_pd(struct ib_pd *pd);
struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
/**
+ * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
+ * work completion.
+ * @hdr: the L3 header to parse
+ * @net_type: type of header to parse
+ * @sgid: place to store source gid
+ * @dgid: place to store destination gid
+ */
+int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
+ enum rdma_network_type net_type,
+ union ib_gid *sgid, union ib_gid *dgid);
+
+/**
+ * ib_get_rdma_header_version - Get the header version
+ * @hdr: the L3 header to parse
+ */
+int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
+
+/**
* ib_init_ah_from_wc - Initializes address handle attributes from a
* work completion.
* @device: Device on which the received message arrived.
@@ -3357,4 +3396,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
void ib_drain_qp(struct ib_qp *qp);
+
+int ib_resolve_eth_dmac(struct ib_device *device,
+ struct ib_ah_attr *ah_attr);
#endif /* IB_VERBS_H */
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 6d0065c322b7..5cd7701db148 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -253,4 +253,10 @@ int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt);
int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr,
int *qp_attr_mask);
+/**
+ * iwcm_reject_msg - return a pointer to a reject message string.
+ * @reason: Value returned in the REJECT event status field.
+ */
+const char *__attribute_const__ iwcm_reject_msg(int reason);
+
#endif /* IW_CM_H */
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index 4a529ef47995..f7896117936e 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -44,8 +44,6 @@
#define OPA_MAX_SLS 32
#define OPA_MAX_SCS 32
-#define OPA_SMI_CLASS_VERSION 0x80
-
#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
struct opa_smp {
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 81fb1d15e8bb..d3968b561f86 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -388,4 +388,29 @@ int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
*/
__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr);
+/**
+ * rdma_reject_msg - return a pointer to a reject message string.
+ * @id: Communication identifier that received the REJECT event.
+ * @reason: Value returned in the REJECT event status field.
+ */
+const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
+ int reason);
+/**
+ * rdma_is_consumer_reject - return true if the consumer rejected the connect
+ * request.
+ * @id: Communication identifier that received the REJECT event.
+ * @reason: Value returned in the REJECT event status field.
+ */
+bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
+
+/**
+ * rdma_consumer_reject_data - return the consumer reject private data and
+ * length, if any.
+ * @id: Communication identifier that received the REJECT event.
+ * @ev: RDMA CM reject event.
+ * @data_len: Pointer to the resulting length of the consumer data.
+ */
+const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
+ struct rdma_cm_event *ev, u8 *data_len);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index e31502107a58..861e23eaebda 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -185,6 +185,27 @@ struct rvt_driver_provided {
* check_support() for details.
*/
+ /* hot path calldowns in a single cacheline */
+
+ /*
+ * Give the driver a notice that there is send work to do. It is up to
+ * the driver to generally push the packets out, this just queues the
+ * work with the driver. There are two variants here. The no_lock
+ * version requires the s_lock not to be held. The other assumes the
+ * s_lock is held.
+ */
+ void (*schedule_send)(struct rvt_qp *qp);
+ void (*schedule_send_no_lock)(struct rvt_qp *qp);
+
+ /* Driver specific work request checking */
+ int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
+ /*
+ * Sometimes rdmavt needs to kick the driver's send progress. That is
+ * done by this call back.
+ */
+ void (*do_send)(struct rvt_qp *qp);
+
/* Passed to ib core registration. Callback to create syfs files */
int (*port_callback)(struct ib_device *, u8, struct kobject *);
@@ -223,22 +244,6 @@ struct rvt_driver_provided {
void (*notify_qp_reset)(struct rvt_qp *qp);
/*
- * Give the driver a notice that there is send work to do. It is up to
- * the driver to generally push the packets out, this just queues the
- * work with the driver. There are two variants here. The no_lock
- * version requires the s_lock not to be held. The other assumes the
- * s_lock is held.
- */
- void (*schedule_send)(struct rvt_qp *qp);
- void (*schedule_send_no_lock)(struct rvt_qp *qp);
-
- /*
- * Sometimes rdmavt needs to kick the driver's send progress. That is
- * done by this call back.
- */
- void (*do_send)(struct rvt_qp *qp);
-
- /*
* Get a path mtu from the driver based on qp attributes.
*/
int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
@@ -324,9 +329,6 @@ struct rvt_driver_provided {
void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
- /* Driver specific work request checking */
- int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
-
/* Notify driver a mad agent has been created */
void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
@@ -355,12 +357,12 @@ struct rvt_dev_info {
/* post send table */
const struct rvt_operation_params *post_parms;
- struct rvt_mregion __rcu *dma_mr;
- struct rvt_lkey_table lkey_table;
-
/* Driver specific helper functions */
struct rvt_driver_provided driver_f;
+ struct rvt_mregion __rcu *dma_mr;
+ struct rvt_lkey_table lkey_table;
+
/* Internal use */
int n_pds_allocated;
spinlock_t n_pds_lock; /* Protect pd allocated count */
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
index 6b3c6c8b6b77..de59de28b6a2 100644
--- a/include/rdma/rdmavt_mr.h
+++ b/include/rdma/rdmavt_mr.h
@@ -90,11 +90,15 @@ struct rvt_mregion {
#define RVT_MAX_LKEY_TABLE_BITS 23
struct rvt_lkey_table {
- spinlock_t lock; /* protect changes in this struct */
- u32 next; /* next unused index (speeds search) */
- u32 gen; /* generation count */
+ /* read mostly fields */
u32 max; /* size of the table */
+ u32 shift; /* lkey/rkey shift */
struct rvt_mregion __rcu **table;
+ /* writeable fields */
+ /* protect changes in this struct */
+ spinlock_t lock ____cacheline_aligned_in_smp;
+ u32 next; /* next unused index (speeds search) */
+ u32 gen; /* generation count */
};
/*
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 2c5183ef0243..f3dbd157ae5c 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -51,6 +51,7 @@
#include <rdma/rdma_vt.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_cq.h>
/*
* Atomic bit definitions for r_aflags.
*/
@@ -485,6 +486,23 @@ static inline void rvt_put_qp(struct rvt_qp *qp)
}
/**
+ * rvt_put_swqe - drop mr refs held by swqe
+ * @wqe - the send wqe
+ *
+ * This drops any mr references held by the swqe
+ */
+static inline void rvt_put_swqe(struct rvt_swqe *wqe)
+{
+ int i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct rvt_sge *sge = &wqe->sg_list[i];
+
+ rvt_put_mr(sge->mr);
+ }
+}
+
+/**
* rvt_qp_wqe_reserve - reserve operation
* @qp - the rvt qp
* @wqe - the send wqe
@@ -527,6 +545,65 @@ static inline void rvt_qp_wqe_unreserve(
}
}
+extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
+
+/**
+ * rvt_qp_swqe_complete() - insert send completion
+ * @qp - the qp
+ * @wqe - the send wqe
+ * @status - completion status
+ *
+ * Insert a send completion into the completion
+ * queue if the qp indicates it should be done.
+ *
+ * See IBTA 10.7.3.1 for info on completion
+ * control.
+ */
+static inline void rvt_qp_swqe_complete(
+ struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+{
+ if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
+ return;
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ status != IB_WC_SUCCESS) {
+ struct ib_wc wc;
+
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = status;
+ wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode];
+ wc.qp = &qp->ibqp;
+ wc.byte_len = wqe->length;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
+ status != IB_WC_SUCCESS);
+ }
+}
+
+/**
+ * @qp - the qp pair
+ * @len - the length
+ *
+ * Perform a shift based mtu round up divide
+ */
+static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
+{
+ return (len + qp->pmtu - 1) >> qp->log_pmtu;
+}
+
+/**
+ * @qp - the qp pair
+ * @len - the length
+ *
+ * Perform a shift based mtu divide
+ */
+static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
+{
+ return len >> qp->log_pmtu;
+}
+
extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index c1260d80ef30..df156f1d50b2 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -74,7 +74,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2)
#define zero_data(p) {p[0]=0;p[1]=0;p[2]=0;}
/* initiator tags; opaque for target */
-typedef uint32_t __bitwise__ itt_t;
+typedef uint32_t __bitwise itt_t;
/* below makes sense only for initiator that created this tag */
#define build_itt(itt, age) ((__force itt_t)\
((itt) | ((age) << ISCSI_AGE_SHIFT)))
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 7428a53257ca..96dd0b3f70d7 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -44,6 +44,11 @@
#define FC_NO_ERR 0 /* no error */
#define FC_EX_TIMEOUT 1 /* Exchange timeout */
#define FC_EX_CLOSED 2 /* Exchange closed */
+#define FC_EX_ALLOC_ERR 3 /* Exchange allocation failed */
+#define FC_EX_XMIT_ERR 4 /* Exchange transmit failed */
+#define FC_EX_ELS_RJT 5 /* ELS rejected */
+#define FC_EX_INV_LOGIN 6 /* Login not completed */
+#define FC_EX_SEQ_ERR 6 /* Exchange sequence error */
/**
* enum fc_lport_state - Local port states
@@ -350,7 +355,8 @@ struct fc_fcp_pkt {
/* Timeout/error related information */
struct timer_list timer;
- int wait_for_comp;
+ int wait_for_comp;
+ int timer_delay;
u32 recov_retry;
struct fc_seq *recov_seq;
struct completion tm_done;
@@ -385,6 +391,7 @@ struct fc_seq {
#define FC_EX_DONE (1 << 0) /* ep is completed */
#define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */
+#define FC_EX_QUARANTINE (1 << 2) /* exch is quarantined */
/**
* struct fc_exch - Fibre Channel Exchange
@@ -478,37 +485,6 @@ struct libfc_function_template {
void *arg, u32 timer_msec);
/*
- * Send the FC frame payload using a new exchange and sequence.
- *
- * The exchange response handler is set in this routine to resp()
- * function pointer. It can be called in two scenarios: if a timeout
- * occurs or if a response frame is received for the exchange. The
- * fc_frame pointer in response handler will also indicate timeout
- * as error using IS_ERR related macros.
- *
- * The exchange destructor handler is also set in this routine.
- * The destructor handler is invoked by EM layer when exchange
- * is about to free, this can be used by caller to free its
- * resources along with exchange free.
- *
- * The arg is passed back to resp and destructor handler.
- *
- * The timeout value (in msec) for an exchange is set if non zero
- * timer_msec argument is specified. The timer is canceled when
- * it fires or when the exchange is done. The exchange timeout handler
- * is registered by EM layer.
- *
- * STATUS: OPTIONAL
- */
- struct fc_seq *(*exch_seq_send)(struct fc_lport *, struct fc_frame *,
- void (*resp)(struct fc_seq *,
- struct fc_frame *,
- void *),
- void (*destructor)(struct fc_seq *,
- void *),
- void *, unsigned int timer_msec);
-
- /*
* Sets up the DDP context for a given exchange id on the given
* scatterlist if LLD supports DDP for large receive.
*
@@ -537,73 +513,6 @@ struct libfc_function_template {
* STATUS: OPTIONAL
*/
void (*get_lesb)(struct fc_lport *, struct fc_els_lesb *lesb);
- /*
- * Send a frame using an existing sequence and exchange.
- *
- * STATUS: OPTIONAL
- */
- int (*seq_send)(struct fc_lport *, struct fc_seq *,
- struct fc_frame *);
-
- /*
- * Send an ELS response using information from the received frame.
- *
- * STATUS: OPTIONAL
- */
- void (*seq_els_rsp_send)(struct fc_frame *, enum fc_els_cmd,
- struct fc_seq_els_data *);
-
- /*
- * Abort an exchange and sequence. Generally called because of a
- * exchange timeout or an abort from the upper layer.
- *
- * A timer_msec can be specified for abort timeout, if non-zero
- * timer_msec value is specified then exchange resp handler
- * will be called with timeout error if no response to abort.
- *
- * STATUS: OPTIONAL
- */
- int (*seq_exch_abort)(const struct fc_seq *,
- unsigned int timer_msec);
-
- /*
- * Indicate that an exchange/sequence tuple is complete and the memory
- * allocated for the related objects may be freed.
- *
- * STATUS: OPTIONAL
- */
- void (*exch_done)(struct fc_seq *);
-
- /*
- * Start a new sequence on the same exchange/sequence tuple.
- *
- * STATUS: OPTIONAL
- */
- struct fc_seq *(*seq_start_next)(struct fc_seq *);
-
- /*
- * Set a response handler for the exchange of the sequence.
- *
- * STATUS: OPTIONAL
- */
- void (*seq_set_resp)(struct fc_seq *sp,
- void (*resp)(struct fc_seq *, struct fc_frame *,
- void *),
- void *arg);
-
- /*
- * Assign a sequence for an incoming request frame.
- *
- * STATUS: OPTIONAL
- */
- struct fc_seq *(*seq_assign)(struct fc_lport *, struct fc_frame *);
-
- /*
- * Release the reference on the sequence returned by seq_assign().
- *
- * STATUS: OPTIONAL
- */
- void (*seq_release)(struct fc_seq *);
/*
* Reset an exchange manager, completing all sequences and exchanges.
@@ -615,27 +524,6 @@ struct libfc_function_template {
void (*exch_mgr_reset)(struct fc_lport *, u32 s_id, u32 d_id);
/*
- * Flush the rport work queue. Generally used before shutdown.
- *
- * STATUS: OPTIONAL
- */
- void (*rport_flush_queue)(void);
-
- /*
- * Receive a frame for a local port.
- *
- * STATUS: OPTIONAL
- */
- void (*lport_recv)(struct fc_lport *, struct fc_frame *);
-
- /*
- * Reset the local port.
- *
- * STATUS: OPTIONAL
- */
- int (*lport_reset)(struct fc_lport *);
-
- /*
* Set the local port FC_ID.
*
* This may be provided by the LLD to allow it to be
@@ -656,54 +544,6 @@ struct libfc_function_template {
struct fc_frame *);
/*
- * Create a remote port with a given port ID
- *
- * STATUS: OPTIONAL
- */
- struct fc_rport_priv *(*rport_create)(struct fc_lport *, u32);
-
- /*
- * Initiates the RP state machine. It is called from the LP module.
- * This function will issue the following commands to the N_Port
- * identified by the FC ID provided.
- *
- * - PLOGI
- * - PRLI
- * - RTV
- *
- * STATUS: OPTIONAL
- */
- int (*rport_login)(struct fc_rport_priv *);
-
- /*
- * Logoff, and remove the rport from the transport if
- * it had been added. This will send a LOGO to the target.
- *
- * STATUS: OPTIONAL
- */
- int (*rport_logoff)(struct fc_rport_priv *);
-
- /*
- * Receive a request from a remote port.
- *
- * STATUS: OPTIONAL
- */
- void (*rport_recv_req)(struct fc_lport *, struct fc_frame *);
-
- /*
- * lookup an rport by it's port ID.
- *
- * STATUS: OPTIONAL
- */
- struct fc_rport_priv *(*rport_lookup)(const struct fc_lport *, u32);
-
- /*
- * Destroy an rport after final kref_put().
- * The argument is a pointer to the kref inside the fc_rport_priv.
- */
- void (*rport_destroy)(struct kref *);
-
- /*
* Callback routine after the remote port is logged in
*
* STATUS: OPTIONAL
@@ -1068,18 +908,26 @@ void fc_vport_setlink(struct fc_lport *);
void fc_vports_linkchange(struct fc_lport *);
int fc_lport_config(struct fc_lport *);
int fc_lport_reset(struct fc_lport *);
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp);
int fc_set_mfs(struct fc_lport *, u32 mfs);
struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize);
struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id);
-int fc_lport_bsg_request(struct fc_bsg_job *);
+int fc_lport_bsg_request(struct bsg_job *);
void fc_lport_set_local_id(struct fc_lport *, u32 port_id);
void fc_lport_iterate(void (*func)(struct fc_lport *, void *), void *);
/*
* REMOTE PORT LAYER
*****************************/
-int fc_rport_init(struct fc_lport *);
void fc_rport_terminate_io(struct fc_rport *);
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+ u32 port_id);
+struct fc_rport_priv *fc_rport_create(struct fc_lport *, u32);
+void fc_rport_destroy(struct kref *kref);
+int fc_rport_login(struct fc_rport_priv *rdata);
+int fc_rport_logoff(struct fc_rport_priv *rdata);
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp);
+void fc_rport_flush_queue(void);
/*
* DISCOVERY LAYER
@@ -1131,6 +979,21 @@ void fc_fill_hdr(struct fc_frame *, const struct fc_frame *,
*****************************/
int fc_exch_init(struct fc_lport *);
void fc_exch_update_stats(struct fc_lport *lport);
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *, void *),
+ void *arg, u32 timer_msec);
+void fc_seq_els_rsp_send(struct fc_frame *, enum fc_els_cmd,
+ struct fc_seq_els_data *);
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
+void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+ void *arg);
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp);
+void fc_seq_release(struct fc_seq *sp);
struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *,
struct fc_exch_mgr *,
bool (*match)(struct fc_frame *));
@@ -1142,6 +1005,9 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *, enum fc_class class,
void fc_exch_mgr_free(struct fc_lport *);
void fc_exch_recv(struct fc_lport *, struct fc_frame *);
void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id);
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp);
+int fc_seq_exch_abort(const struct fc_seq *, unsigned int timer_msec);
+void fc_exch_done(struct fc_seq *sp);
/*
* Functions for fc_functions_template
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 8a9563144890..8990e580b278 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -414,14 +414,14 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
extern int scsi_execute_req_flags(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
- int retries, int *resid, u64 flags);
+ int retries, int *resid, u64 flags, req_flags_t rq_flags);
static inline int scsi_execute_req(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
int retries, int *resid)
{
return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
- bufflen, sshdr, timeout, retries, resid, 0);
+ bufflen, sshdr, timeout, retries, resid, 0, 0);
}
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7e4cd53139ed..36680f13270d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -278,6 +278,14 @@ struct scsi_host_template {
int (* change_queue_depth)(struct scsi_device *, int);
/*
+ * This functions lets the driver expose the queue mapping
+ * to the block layer.
+ *
+ * Status: OPTIONAL
+ */
+ int (* map_queues)(struct Scsi_Host *shost);
+
+ /*
* This function determines the BIOS parameters for a given
* harddisk. These tend to be numbers that are made up by
* the host adapter. Parameters:
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index d1defd1ebd95..6ba66e01f6df 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -299,4 +299,21 @@ struct scsi_lun {
#define SCSI_ACCESS_STATE_MASK 0x0f
#define SCSI_ACCESS_STATE_PREFERRED 0x80
+/* Reporting options for REPORT ZONES */
+enum zbc_zone_reporting_options {
+ ZBC_ZONE_REPORTING_OPTION_ALL = 0,
+ ZBC_ZONE_REPORTING_OPTION_EMPTY,
+ ZBC_ZONE_REPORTING_OPTION_IMPLICIT_OPEN,
+ ZBC_ZONE_REPORTING_OPTION_EXPLICIT_OPEN,
+ ZBC_ZONE_REPORTING_OPTION_CLOSED,
+ ZBC_ZONE_REPORTING_OPTION_FULL,
+ ZBC_ZONE_REPORTING_OPTION_READONLY,
+ ZBC_ZONE_REPORTING_OPTION_OFFLINE,
+ ZBC_ZONE_REPORTING_OPTION_NEED_RESET_WP = 0x10,
+ ZBC_ZONE_REPORTING_OPTION_NON_SEQWRITE,
+ ZBC_ZONE_REPORTING_OPTION_NON_WP = 0x3f,
+};
+
+#define ZBC_REPORT_ZONE_PARTIAL 0x80
+
#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index bf66ea6bed2b..924c8e614b45 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -28,9 +28,11 @@
#define SCSI_TRANSPORT_FC_H
#include <linux/sched.h>
+#include <linux/bsg-lib.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_netlink.h>
+#include <scsi/scsi_host.h>
struct scsi_transport_template;
@@ -624,48 +626,6 @@ struct fc_host_attrs {
#define fc_host_dev_loss_tmo(x) \
(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
-
-struct fc_bsg_buffer {
- unsigned int payload_len;
- int sg_cnt;
- struct scatterlist *sg_list;
-};
-
-/* Values for fc_bsg_job->state_flags (bitflags) */
-#define FC_RQST_STATE_INPROGRESS 0
-#define FC_RQST_STATE_DONE 1
-
-struct fc_bsg_job {
- struct Scsi_Host *shost;
- struct fc_rport *rport;
- struct device *dev;
- struct request *req;
- spinlock_t job_lock;
- unsigned int state_flags;
- unsigned int ref_cnt;
- void (*job_done)(struct fc_bsg_job *);
-
- struct fc_bsg_request *request;
- struct fc_bsg_reply *reply;
- unsigned int request_len;
- unsigned int reply_len;
- /*
- * On entry : reply_len indicates the buffer size allocated for
- * the reply.
- *
- * Upon completion : the message handler must set reply_len
- * to indicates the size of the reply to be returned to the
- * caller.
- */
-
- /* DMA payloads for the request/response */
- struct fc_bsg_buffer request_payload;
- struct fc_bsg_buffer reply_payload;
-
- void *dd_data; /* Used for driver-specific storage */
-};
-
-
/* The functions by which the transport class and the driver communicate */
struct fc_function_template {
void (*get_rport_dev_loss_tmo)(struct fc_rport *);
@@ -702,8 +662,8 @@ struct fc_function_template {
int (* it_nexus_response)(struct Scsi_Host *, u64, int);
/* bsg support */
- int (*bsg_request)(struct fc_bsg_job *);
- int (*bsg_timeout)(struct fc_bsg_job *);
+ int (*bsg_request)(struct bsg_job *);
+ int (*bsg_timeout)(struct bsg_job *);
/* allocation lengths for host-specific data */
u32 dd_fcrport_size;
@@ -849,4 +809,18 @@ struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
int fc_vport_terminate(struct fc_vport *vport);
int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
+static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job)
+{
+ if (scsi_is_host_device(job->dev))
+ return dev_to_shost(job->dev);
+ return rport_to_shost(dev_to_rport(job->dev));
+}
+
+static inline struct fc_rport *fc_bsg_to_rport(struct bsg_job *job)
+{
+ if (scsi_is_fc_rport(job->dev))
+ return dev_to_rport(job->dev);
+ return NULL;
+}
+
#endif /* SCSI_TRANSPORT_FC_H */
diff --git a/include/soc/arc/aux.h b/include/soc/arc/aux.h
new file mode 100644
index 000000000000..8c3fb13e0452
--- /dev/null
+++ b/include/soc/arc/aux.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016-2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __SOC_ARC_AUX_H__
+#define __SOC_ARC_AUX_H__
+
+#ifdef CONFIG_ARC
+
+#define read_aux_reg(r) __builtin_arc_lr(r)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(r, v) __builtin_arc_sr((unsigned int)(v), r)
+
+#else /* !CONFIG_ARC */
+
+static inline int read_aux_reg(u32 r)
+{
+ return 0;
+}
+
+/*
+ * function helps elide unused variable warning
+ * see: http://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html
+ */
+static inline void write_aux_reg(u32 r, u32 v)
+{
+ ;
+}
+
+#endif
+
+#define READ_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ tmp = read_aux_reg(reg); \
+ if (sizeof(tmp) == sizeof(into)) { \
+ into = *((typeof(into) *)&tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+#define WRITE_AUX(reg, into) \
+{ \
+ unsigned int tmp; \
+ if (sizeof(tmp) == sizeof(into)) { \
+ tmp = (*(unsigned int *)&(into)); \
+ write_aux_reg(reg, tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+
+#endif
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
new file mode 100644
index 000000000000..6902c2a8bd23
--- /dev/null
+++ b/include/soc/arc/mcip.h
@@ -0,0 +1,103 @@
+/*
+ * ARConnect IP Support (Multi core enabler: Cross core IPI, RTC ...)
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_ARC_MCIP_H
+#define __SOC_ARC_MCIP_H
+
+#include <soc/arc/aux.h>
+
+#define ARC_REG_MCIP_BCR 0x0d0
+#define ARC_REG_MCIP_CMD 0x600
+#define ARC_REG_MCIP_WDATA 0x601
+#define ARC_REG_MCIP_READBACK 0x602
+
+struct mcip_cmd {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, param:16, cmd:8;
+#else
+ unsigned int cmd:8, param:16, pad:8;
+#endif
+
+#define CMD_INTRPT_GENERATE_IRQ 0x01
+#define CMD_INTRPT_GENERATE_ACK 0x02
+#define CMD_INTRPT_READ_STATUS 0x03
+#define CMD_INTRPT_CHECK_SOURCE 0x04
+
+/* Semaphore Commands */
+#define CMD_SEMA_CLAIM_AND_READ 0x11
+#define CMD_SEMA_RELEASE 0x12
+
+#define CMD_DEBUG_SET_MASK 0x34
+#define CMD_DEBUG_SET_SELECT 0x36
+
+#define CMD_GFRC_READ_LO 0x42
+#define CMD_GFRC_READ_HI 0x43
+
+#define CMD_IDU_ENABLE 0x71
+#define CMD_IDU_DISABLE 0x72
+#define CMD_IDU_SET_MODE 0x74
+#define CMD_IDU_SET_DEST 0x76
+#define CMD_IDU_SET_MASK 0x7C
+
+#define IDU_M_TRIG_LEVEL 0x0
+#define IDU_M_TRIG_EDGE 0x1
+
+#define IDU_M_DISTRI_RR 0x0
+#define IDU_M_DISTRI_DEST 0x2
+};
+
+struct mcip_bcr {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:8,
+ idu:1, llm:1, num_cores:6,
+ iocoh:1, gfrc:1, dbg:1, pad2:1,
+ msg:1, sem:1, ipi:1, pad:1,
+ ver:8;
+#else
+ unsigned int ver:8,
+ pad:1, ipi:1, sem:1, msg:1,
+ pad2:1, dbg:1, gfrc:1, iocoh:1,
+ num_cores:6, llm:1, idu:1,
+ pad3:8;
+#endif
+};
+
+/*
+ * MCIP programming model
+ *
+ * - Simple commands write {cmd:8,param:16} to MCIP_CMD aux reg
+ * (param could be irq, common_irq, core_id ...)
+ * - More involved commands setup MCIP_WDATA with cmd specific data
+ * before invoking the simple command
+ */
+static inline void __mcip_cmd(unsigned int cmd, unsigned int param)
+{
+ struct mcip_cmd buf;
+
+ buf.pad = 0;
+ buf.cmd = cmd;
+ buf.param = param;
+
+ WRITE_AUX(ARC_REG_MCIP_CMD, buf);
+}
+
+/*
+ * Setup additional data for a cmd
+ * Callers need to lock to ensure atomicity
+ */
+static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
+ unsigned int data)
+{
+ write_aux_reg(ARC_REG_MCIP_WDATA, data);
+
+ __mcip_cmd(cmd, param);
+}
+
+#endif
diff --git a/include/soc/arc/timers.h b/include/soc/arc/timers.h
new file mode 100644
index 000000000000..a20ed2fbc432
--- /dev/null
+++ b/include/soc/arc/timers.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_ARC_TIMERS_H
+#define __SOC_ARC_TIMERS_H
+
+#include <soc/arc/aux.h>
+
+/* Timer related Aux registers */
+#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
+#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
+#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
+#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
+#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
+#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
+
+/* CTRL reg bits */
+#define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
+#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
+
+#define ARC_TIMERN_MAX 0xFFFFFFFF
+
+#define ARC_REG_TIMERS_BCR 0x75
+
+struct bcr_timer {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:15, rtsc:1, pad1:5, rtc:1, t1:1, t0:1, ver:8;
+#else
+ unsigned int ver:8, t0:1, t1:1, rtc:1, pad1:5, rtsc:1, pad2:15;
+#endif
+};
+
+#endif
diff --git a/include/soc/at91/atmel-secumod.h b/include/soc/at91/atmel-secumod.h
new file mode 100644
index 000000000000..22cd5d506926
--- /dev/null
+++ b/include/soc/at91/atmel-secumod.h
@@ -0,0 +1,19 @@
+/*
+ * Atmel Security Module register offsets and bit definitions.
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_SOC_AT91_ATMEL_SECUMOD_H
+#define _LINUX_SOC_AT91_ATMEL_SECUMOD_H
+
+#define AT91_SECUMOD_RAMRDY 0x14
+#define AT91_SECUMOD_RAMRDY_READY BIT(0)
+
+#endif /* _LINUX_SOC_AT91_ATMEL_SECUMOD_H */
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 3fb357193f09..cb979ad90401 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -109,14 +109,35 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
+ RPI_FIRMWARE_VCHIQ_INIT = 0x00048010,
+
RPI_FIRMWARE_GET_COMMAND_LINE = 0x00050001,
RPI_FIRMWARE_GET_DMA_CHANNELS = 0x00060001,
};
+#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
int rpi_firmware_property(struct rpi_firmware *fw,
u32 tag, void *data, size_t len);
int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size);
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
+#else
+static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
+ void *data, size_t len)
+{
+ return 0;
+}
+
+static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
+ void *data, size_t tag_size)
+{
+ return 0;
+}
+
+static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
+{
+ return NULL;
+}
+#endif
#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 37f3eb001a16..3d4df74a96de 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -244,11 +244,11 @@ static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
struct qm_dqrr_entry {
u8 verb;
u8 stat;
- u16 seqnum; /* 15-bit */
+ __be16 seqnum; /* 15-bit */
u8 tok;
u8 __reserved2[3];
- u32 fqid; /* 24-bit */
- u32 contextB;
+ __be32 fqid; /* 24-bit */
+ __be32 context_b;
struct qm_fd fd;
u8 __reserved4[32];
} __packed;
@@ -262,6 +262,11 @@ struct qm_dqrr_entry {
#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+/* 'fqid' is a 24-bit field in every h/w descriptor */
+#define QM_FQID_MASK GENMASK(23, 0)
+#define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
+#define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
+
/* "ERN Message Response" */
/* "FQ State Change Notification" */
union qm_mr_entry {
@@ -272,12 +277,11 @@ union qm_mr_entry {
struct {
u8 verb;
u8 dca;
- u16 seqnum;
+ __be16 seqnum;
u8 rc; /* Rej Code: 8-bit */
- u8 orp_hi; /* ORP: 24-bit */
- u16 orp_lo;
- u32 fqid; /* 24-bit */
- u32 tag;
+ u8 __reserved[3];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
struct qm_fd fd;
u8 __reserved1[32];
} __packed ern;
@@ -285,8 +289,8 @@ union qm_mr_entry {
u8 verb;
u8 fqs; /* Frame Queue Status */
u8 __reserved1[6];
- u32 fqid; /* 24-bit */
- u32 contextB;
+ __be32 fqid; /* 24-bit */
+ __be32 context_b;
u8 __reserved2[48];
} __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
};
@@ -405,13 +409,13 @@ static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
{
- fqd->context_a.context_hi = upper_32_bits(addr);
- fqd->context_a.context_lo = lower_32_bits(addr);
+ fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
+ fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
}
static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
{
- fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
+ fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
}
@@ -521,7 +525,7 @@ static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
*/
struct qm_cgr_wr_parm {
/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
- u32 word;
+ __be32 word;
};
/*
* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
@@ -532,7 +536,7 @@ struct qm_cgr_wr_parm {
*/
struct qm_cgr_cs_thres {
/* _res[13-15], TA[5-12], Tn[0-4] */
- u16 word;
+ __be16 word;
};
/*
* This identical structure of CGR fields is present in the "Init/Modify CGR"
@@ -549,10 +553,10 @@ struct __qm_mc_cgr {
u8 cscn_en; /* boolean, use QM_CGR_EN */
union {
struct {
- u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
- u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
+ __be16 cscn_targ_dcp_low;
};
- u32 cscn_targ; /* use QM_CGR_TARG_* */
+ __be32 cscn_targ; /* use QM_CGR_TARG_* */
};
u8 cstd_en; /* boolean, use QM_CGR_EN */
u8 cs; /* boolean, only used in query response */
@@ -568,7 +572,9 @@ struct __qm_mc_cgr {
/* Convert CGR thresholds to/from "cs_thres" format */
static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
{
- return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+ int thres = be16_to_cpu(th->word);
+
+ return ((thres >> 5) & 0xff) << (thres & 0x1f);
}
static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
@@ -584,23 +590,23 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
if (roundup && oddbit)
val++;
}
- th->word = ((val & 0xff) << 5) | (e & 0x1f);
+ th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
return 0;
}
/* "Initialize FQ" */
struct qm_mcc_initfq {
u8 __reserved1[2];
- u16 we_mask; /* Write Enable Mask */
- u32 fqid; /* 24-bit */
- u16 count; /* Initialises 'count+1' FQDs */
+ __be16 we_mask; /* Write Enable Mask */
+ __be32 fqid; /* 24-bit */
+ __be16 count; /* Initialises 'count+1' FQDs */
struct qm_fqd fqd; /* the FQD fields go here */
u8 __reserved2[30];
} __packed;
/* "Initialize/Modify CGR" */
struct qm_mcc_initcgr {
u8 __reserve1[2];
- u16 we_mask; /* Write Enable Mask */
+ __be16 we_mask; /* Write Enable Mask */
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[2];
u8 cgid;
@@ -654,7 +660,7 @@ struct qman_cgr;
/*
* This enum, and the callback type that returns it, are used when handling
* dequeued frames via DQRR. Note that for "null" callbacks registered with the
- * portal object (for handling dequeues that do not demux because contextB is
+ * portal object (for handling dequeues that do not demux because context_b is
* NULL), the return value *MUST* be qman_cb_dqrr_consume.
*/
enum qman_cb_dqrr_result {
@@ -859,11 +865,11 @@ void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
* qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
* pre-existing frame-queues that aren't to be otherwise interfered with, it
* prevents all other modifications to the frame queue. The TO_DCPORTAL flag
- * causes the driver to honour any contextB modifications requested in the
+ * causes the driver to honour any context_b modifications requested in the
* qm_init_fq() API, as this indicates the frame queue will be consumed by a
* direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
- * software portals, the contextB field is controlled by the driver and can't be
- * modified by the caller.
+ * software portals, the context_b field is controlled by the driver and can't
+ * be modified by the caller.
*/
int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
diff --git a/include/soc/nps/mtm.h b/include/soc/nps/mtm.h
new file mode 100644
index 000000000000..d2f5e7e3703e
--- /dev/null
+++ b/include/soc/nps/mtm.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SOC_NPS_MTM_H
+#define SOC_NPS_MTM_H
+
+#define CTOP_INST_HWSCHD_OFF_R3 0x3B6F00BF
+#define CTOP_INST_HWSCHD_RESTORE_R3 0x3E6F70C3
+
+static inline void hw_schd_save(unsigned int *flags)
+{
+ __asm__ __volatile__(
+ " .word %1\n"
+ " st r3,[%0]\n"
+ :
+ : "r"(flags), "i"(CTOP_INST_HWSCHD_OFF_R3)
+ : "r3", "memory");
+}
+
+static inline void hw_schd_restore(unsigned int flags)
+{
+ __asm__ __volatile__(
+ " mov r3, %0\n"
+ " .word %1\n"
+ :
+ : "r"(flags), "i"(CTOP_INST_HWSCHD_RESTORE_R3)
+ : "r3");
+}
+
+#endif /* SOC_NPS_MTM_H */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
new file mode 100644
index 000000000000..0aaef5960e29
--- /dev/null
+++ b/include/soc/tegra/bpmp-abi.h
@@ -0,0 +1,1601 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ABI_BPMP_ABI_H_
+#define _ABI_BPMP_ABI_H_
+
+#ifdef LK
+#include <stdint.h>
+#endif
+
+#ifndef __ABI_PACKED
+#define __ABI_PACKED __attribute__((packed))
+#endif
+
+#ifdef NO_GCC_EXTENSIONS
+#define EMPTY char empty;
+#define EMPTY_ARRAY 1
+#else
+#define EMPTY
+#define EMPTY_ARRAY 0
+#endif
+
+#ifndef __UNION_ANON
+#define __UNION_ANON
+#endif
+/**
+ * @file
+ */
+
+
+/**
+ * @defgroup MRQ MRQ Messages
+ * @brief Messages sent to/from BPMP via IPC
+ * @{
+ * @defgroup MRQ_Format Message Format
+ * @defgroup MRQ_Codes Message Request (MRQ) Codes
+ * @defgroup MRQ_Payloads Message Payloads
+ * @defgroup Error_Codes Error Codes
+ * @}
+ */
+
+/**
+ * @addtogroup MRQ_Format Message Format
+ * @{
+ * The CPU requests the BPMP to perform a particular service by
+ * sending it an IVC frame containing a single MRQ message. An MRQ
+ * message consists of a @ref mrq_request followed by a payload whose
+ * format depends on mrq_request::mrq.
+ *
+ * The BPMP processes the data and replies with an IVC frame (on the
+ * same IVC channel) containing and MRQ response. An MRQ response
+ * consists of a @ref mrq_response followed by a payload whose format
+ * depends on the associated mrq_request::mrq.
+ *
+ * A well-defined subset of the MRQ messages that the CPU sends to the
+ * BPMP can lead to BPMP eventually sending an MRQ message to the
+ * CPU. For example, when the CPU uses an #MRQ_THERMAL message to set
+ * a thermal trip point, the BPMP may eventually send a single
+ * #MRQ_THERMAL message of its own to the CPU indicating that the trip
+ * point has been crossed.
+ * @}
+ */
+
+/**
+ * @ingroup MRQ_Format
+ * @brief header for an MRQ message
+ *
+ * Provides the MRQ number for the MRQ message: #mrq. The remainder of
+ * the MRQ message is a payload (immediately following the
+ * mrq_request) whose format depends on mrq.
+ *
+ * @todo document the flags
+ */
+struct mrq_request {
+ /** @brief MRQ number of the request */
+ uint32_t mrq;
+ /** @brief flags for the request */
+ uint32_t flags;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Format
+ * @brief header for an MRQ response
+ *
+ * Provides an error code for the associated MRQ message. The
+ * remainder of the MRQ response is a payload (immediately following
+ * the mrq_response) whose format depends on the associated
+ * mrq_request::mrq
+ *
+ * @todo document the flags
+ */
+struct mrq_response {
+ /** @brief error code for the MRQ request itself */
+ int32_t err;
+ /** @brief flags for the response */
+ uint32_t flags;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Format
+ * Minimum needed size for an IPC message buffer
+ */
+#define MSG_MIN_SZ 128
+/**
+ * @ingroup MRQ_Format
+ * Minimum size guaranteed for data in an IPC message buffer
+ */
+#define MSG_DATA_MIN_SZ 120
+
+/**
+ * @ingroup MRQ_Codes
+ * @name Legal MRQ codes
+ * These are the legal values for mrq_request::mrq
+ * @{
+ */
+
+#define MRQ_PING 0
+#define MRQ_QUERY_TAG 1
+#define MRQ_MODULE_LOAD 4
+#define MRQ_MODULE_UNLOAD 5
+#define MRQ_TRACE_MODIFY 7
+#define MRQ_WRITE_TRACE 8
+#define MRQ_THREADED_PING 9
+#define MRQ_MODULE_MAIL 11
+#define MRQ_DEBUGFS 19
+#define MRQ_RESET 20
+#define MRQ_I2C 21
+#define MRQ_CLK 22
+#define MRQ_QUERY_ABI 23
+#define MRQ_PG_READ_STATE 25
+#define MRQ_PG_UPDATE_STATE 26
+#define MRQ_THERMAL 27
+#define MRQ_CPU_VHINT 28
+#define MRQ_ABI_RATCHET 29
+#define MRQ_EMC_DVFS_LATENCY 31
+#define MRQ_TRACE_ITER 64
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @brief Maximum MRQ code to be sent by CPU software to
+ * BPMP. Subject to change in future
+ */
+#define MAX_CPU_MRQ_ID 64
+
+/**
+ * @addtogroup MRQ_Payloads Message Payloads
+ * @{
+ * @defgroup Ping
+ * @defgroup Query_Tag Query Tag
+ * @defgroup Module Loadable Modules
+ * @defgroup Trace
+ * @defgroup Debugfs
+ * @defgroup Reset
+ * @defgroup I2C
+ * @defgroup Clocks
+ * @defgroup ABI_info ABI Info
+ * @defgroup MC_Flush MC Flush
+ * @defgroup Powergating
+ * @defgroup Thermal
+ * @defgroup Vhint CPU Voltage hint
+ * @defgroup MRQ_Deprecated Deprecated MRQ messages
+ * @defgroup EMC
+ * @}
+ */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PING
+ * @brief A simple ping
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: @ref mrq_ping_request
+ * * Response Payload: @ref mrq_ping_response
+ *
+ * @ingroup MRQ_Codes
+ * @def MRQ_THREADED_PING
+ * @brief A deeper ping
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_ping_request
+ * * Response Payload: @ref mrq_ping_response
+ *
+ * Behavior is equivalent to a simple #MRQ_PING except that BPMP
+ * responds from a thread context (providing a slightly more robust
+ * sign of life).
+ *
+ */
+
+/**
+ * @ingroup Ping
+ * @brief request with #MRQ_PING
+ *
+ * Used by the sender of an #MRQ_PING message to request a pong from
+ * recipient. The response from the recipient is computed based on
+ * #challenge.
+ */
+struct mrq_ping_request {
+/** @brief arbitrarily chosen value */
+ uint32_t challenge;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Ping
+ * @brief response to #MRQ_PING
+ *
+ * Sent in response to an #MRQ_PING message. #reply should be the
+ * mrq_ping_request challenge left shifted by 1 with the carry-bit
+ * dropped.
+ *
+ */
+struct mrq_ping_response {
+ /** @brief response to the MRQ_PING challege */
+ uint32_t reply;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_QUERY_TAG
+ * @brief Query BPMP firmware's tag (i.e. version information)
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_query_tag_request
+ * * Response Payload: N/A
+ *
+ */
+
+/**
+ * @ingroup Query_Tag
+ * @brief request with #MRQ_QUERY_TAG
+ *
+ * Used by #MRQ_QUERY_TAG call to ask BPMP to fill in the memory
+ * pointed by #addr with BPMP firmware header.
+ *
+ * The sender is reponsible for ensuring that #addr is mapped in to
+ * the recipient's address map.
+ */
+struct mrq_query_tag_request {
+ /** @brief base address to store the firmware header */
+ uint32_t addr;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_LOAD
+ * @brief dynamically load a BPMP code module
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_load_request
+ * * Response Payload: @ref mrq_module_load_response
+ *
+ * @note This MRQ is disabled on production systems
+ *
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_LOAD
+ *
+ * Used by #MRQ_MODULE_LOAD calls to ask the recipient to dynamically
+ * load the code located at #phys_addr and having size #size
+ * bytes. #phys_addr is treated as a void pointer.
+ *
+ * The recipient copies the code from #phys_addr to locally allocated
+ * memory prior to responding to this message.
+ *
+ * @todo document the module header format
+ *
+ * The sender is responsible for ensuring that the code is mapped in
+ * the recipient's address map.
+ *
+ */
+struct mrq_module_load_request {
+ /** @brief base address of the code to load. Treated as (void *) */
+ uint32_t phys_addr; /* (void *) */
+ /** @brief size in bytes of code to load */
+ uint32_t size;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Module
+ * @brief response to #MRQ_MODULE_LOAD
+ *
+ * @todo document mrq_response::err
+ */
+struct mrq_module_load_response {
+ /** @brief handle to the loaded module */
+ uint32_t base;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_UNLOAD
+ * @brief unload a previously loaded code module
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_unload_request
+ * * Response Payload: N/A
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_UNLOAD
+ *
+ * Used by #MRQ_MODULE_UNLOAD calls to request that a previously loaded
+ * module be unloaded.
+ */
+struct mrq_module_unload_request {
+ /** @brief handle of the module to unload */
+ uint32_t base;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_TRACE_MODIFY
+ * @brief modify the set of enabled trace events
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_trace_modify_request
+ * * Response Payload: @ref mrq_trace_modify_response
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Trace
+ * @brief request with #MRQ_TRACE_MODIFY
+ *
+ * Used by %MRQ_TRACE_MODIFY calls to enable or disable specify trace
+ * events. #set takes precedence for any bit set in both #set and
+ * #clr.
+ */
+struct mrq_trace_modify_request {
+ /** @brief bit mask of trace events to disable */
+ uint32_t clr;
+ /** @brief bit mask of trace events to enable */
+ uint32_t set;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Trace
+ * @brief response to #MRQ_TRACE_MODIFY
+ *
+ * Sent in repsonse to an #MRQ_TRACE_MODIFY message. #mask reflects the
+ * state of which events are enabled after the recipient acted on the
+ * message.
+ *
+ */
+struct mrq_trace_modify_response {
+ /** @brief bit mask of trace event enable states */
+ uint32_t mask;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_WRITE_TRACE
+ * @brief Write trace data to a buffer
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_write_trace_request
+ * * Response Payload: @ref mrq_write_trace_response
+ *
+ * mrq_response::err depends on the @ref mrq_write_trace_request field
+ * values. err is -#BPMP_EINVAL if size is zero or area is NULL or
+ * area is in an illegal range. A positive value for err indicates the
+ * number of bytes written to area.
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Trace
+ * @brief request with #MRQ_WRITE_TRACE
+ *
+ * Used by MRQ_WRITE_TRACE calls to ask the recipient to copy trace
+ * data from the recipient's local buffer to the output buffer. #area
+ * is treated as a byte-aligned pointer in the recipient's address
+ * space.
+ *
+ * The sender is responsible for ensuring that the output
+ * buffer is mapped in the recipient's address map. The recipient is
+ * responsible for protecting its own code and data from accidental
+ * overwrites.
+ */
+struct mrq_write_trace_request {
+ /** @brief base address of output buffer */
+ uint32_t area;
+ /** @brief size in bytes of the output buffer */
+ uint32_t size;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Trace
+ * @brief response to #MRQ_WRITE_TRACE
+ *
+ * Once this response is sent, the respondent will not access the
+ * output buffer further.
+ */
+struct mrq_write_trace_response {
+ /**
+ * @brief flag whether more data remains in local buffer
+ *
+ * Value is 1 if the entire local trace buffer has been
+ * drained to the outputbuffer. Value is 0 otherwise.
+ */
+ uint32_t eof;
+} __ABI_PACKED;
+
+/** @private */
+struct mrq_threaded_ping_request {
+ uint32_t challenge;
+} __ABI_PACKED;
+
+/** @private */
+struct mrq_threaded_ping_response {
+ uint32_t reply;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_MAIL
+ * @brief send a message to a loadable module
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_mail_request
+ * * Response Payload: @ref mrq_module_mail_response
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_MAIL
+ */
+struct mrq_module_mail_request {
+ /** @brief handle to the previously loaded module */
+ uint32_t base;
+ /** @brief module-specific mail payload
+ *
+ * The length of data[ ] is unknown to the BPMP core firmware
+ * but it is limited to the size of an IPC message.
+ */
+ uint8_t data[EMPTY_ARRAY];
+} __ABI_PACKED;
+
+/**
+ * @ingroup Module
+ * @brief response to #MRQ_MODULE_MAIL
+ */
+struct mrq_module_mail_response {
+ /** @brief module-specific mail payload
+ *
+ * The length of data[ ] is unknown to the BPMP core firmware
+ * but it is limited to the size of an IPC message.
+ */
+ uint8_t data[EMPTY_ARRAY];
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_DEBUGFS
+ * @brief Interact with BPMP's debugfs file nodes
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_debugfs_request
+ * * Response Payload: @ref mrq_debugfs_response
+ */
+
+/**
+ * @addtogroup Debugfs
+ * @{
+ *
+ * The BPMP firmware implements a pseudo-filesystem called
+ * debugfs. Any driver within the firmware may register with debugfs
+ * to expose an arbitrary set of "files" in the filesystem. When
+ * software on the CPU writes to a debugfs file, debugfs passes the
+ * written data to a callback provided by the driver. When software on
+ * the CPU reads a debugfs file, debugfs queries the driver for the
+ * data to return to the CPU. The intention of the debugfs filesystem
+ * is to provide information useful for debugging the system at
+ * runtime.
+ *
+ * @note The files exposed via debugfs are not part of the
+ * BPMP firmware's ABI. debugfs files may be added or removed in any
+ * given version of the firmware. Typically the semantics of a debugfs
+ * file are consistent from version to version but even that is not
+ * guaranteed.
+ *
+ * @}
+ */
+/** @ingroup Debugfs */
+enum mrq_debugfs_commands {
+ CMD_DEBUGFS_READ = 1,
+ CMD_DEBUGFS_WRITE = 2,
+ CMD_DEBUGFS_DUMPDIR = 3,
+ CMD_DEBUGFS_MAX
+};
+
+/**
+ * @ingroup Debugfs
+ * @brief parameters for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_fileop_request {
+ /** @brief physical address pointing at filename */
+ uint32_t fnameaddr;
+ /** @brief length in bytes of filename buffer */
+ uint32_t fnamelen;
+ /** @brief physical address pointing to data buffer */
+ uint32_t dataaddr;
+ /** @brief length in bytes of data buffer */
+ uint32_t datalen;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief parameters for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_dumpdir_request {
+ /** @brief physical address pointing to data buffer */
+ uint32_t dataaddr;
+ /** @brief length in bytes of data buffer */
+ uint32_t datalen;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief response data for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_fileop_response {
+ /** @brief always 0 */
+ uint32_t reserved;
+ /** @brief number of bytes read from or written to data buffer */
+ uint32_t nbytes;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief response data for CMD_DEBUGFS_DUMPDIR command
+ */
+struct cmd_debugfs_dumpdir_response {
+ /** @brief always 0 */
+ uint32_t reserved;
+ /** @brief number of bytes read from or written to data buffer */
+ uint32_t nbytes;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief request with #MRQ_DEBUGFS.
+ *
+ * The sender of an MRQ_DEBUGFS message uses #cmd to specify a debugfs
+ * command to execute. Legal commands are the values of @ref
+ * mrq_debugfs_commands. Each command requires a specific additional
+ * payload of data.
+ *
+ * |command |payload|
+ * |-------------------|-------|
+ * |CMD_DEBUGFS_READ |fop |
+ * |CMD_DEBUGFS_WRITE |fop |
+ * |CMD_DEBUGFS_DUMPDIR|dumpdir|
+ */
+struct mrq_debugfs_request {
+ uint32_t cmd;
+ union {
+ struct cmd_debugfs_fileop_request fop;
+ struct cmd_debugfs_dumpdir_request dumpdir;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ */
+struct mrq_debugfs_response {
+ /** @brief always 0 */
+ int32_t reserved;
+ union {
+ /** @brief response data for CMD_DEBUGFS_READ OR
+ * CMD_DEBUGFS_WRITE command
+ */
+ struct cmd_debugfs_fileop_response fop;
+ /** @brief response data for CMD_DEBUGFS_DUMPDIR command */
+ struct cmd_debugfs_dumpdir_response dumpdir;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @addtogroup Debugfs
+ * @{
+ */
+#define DEBUGFS_S_ISDIR (1 << 9)
+#define DEBUGFS_S_IRUSR (1 << 8)
+#define DEBUGFS_S_IWUSR (1 << 7)
+/** @} */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_RESET
+ * @brief reset an IP block
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_reset_request
+ * * Response Payload: N/A
+ */
+
+/**
+ * @ingroup Reset
+ */
+enum mrq_reset_commands {
+ CMD_RESET_ASSERT = 1,
+ CMD_RESET_DEASSERT = 2,
+ CMD_RESET_MODULE = 3,
+ CMD_RESET_MAX, /* not part of ABI and subject to change */
+};
+
+/**
+ * @ingroup Reset
+ * @brief request with MRQ_RESET
+ *
+ * Used by the sender of an #MRQ_RESET message to request BPMP to
+ * assert or or deassert a given reset line.
+ */
+struct mrq_reset_request {
+ /** @brief reset action to perform (@enum mrq_reset_commands) */
+ uint32_t cmd;
+ /** @brief id of the reset to affected */
+ uint32_t reset_id;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_I2C
+ * @brief issue an i2c transaction
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_i2c_request
+ * * Response Payload: @ref mrq_i2c_response
+ */
+
+/**
+ * @addtogroup I2C
+ * @{
+ */
+#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12)
+#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4)
+/** @} */
+
+/**
+ * @ingroup I2C
+ * @name Serial I2C flags
+ * Use these flags with serial_i2c_request::flags
+ * @{
+ */
+#define SERIALI2C_TEN 0x0010
+#define SERIALI2C_RD 0x0001
+#define SERIALI2C_STOP 0x8000
+#define SERIALI2C_NOSTART 0x4000
+#define SERIALI2C_REV_DIR_ADDR 0x2000
+#define SERIALI2C_IGNORE_NAK 0x1000
+#define SERIALI2C_NO_RD_ACK 0x0800
+#define SERIALI2C_RECV_LEN 0x0400
+/** @} */
+/** @ingroup I2C */
+enum {
+ CMD_I2C_XFER = 1
+};
+
+/**
+ * @ingroup I2C
+ * @brief serializable i2c request
+ *
+ * Instances of this structure are packed (little-endian) into
+ * cmd_i2c_xfer_request::data_buf. Each instance represents a single
+ * transaction (or a portion of a transaction with repeated starts) on
+ * an i2c bus.
+ *
+ * Because these structures are packed, some instances are likely to
+ * be misaligned. Additionally because #data is variable length, it is
+ * not possible to iterate through a serialized list of these
+ * structures without inspecting #len in each instance. It may be
+ * easier to serialize or deserialize cmd_i2c_xfer_request::data_buf
+ * manually rather than using this structure definition.
+*/
+struct serial_i2c_request {
+ /** @brief I2C slave address */
+ uint16_t addr;
+ /** @brief bitmask of SERIALI2C_ flags */
+ uint16_t flags;
+ /** @brief length of I2C transaction in bytes */
+ uint16_t len;
+ /** @brief for write transactions only, #len bytes of data */
+ uint8_t data[];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief trigger one or more i2c transactions
+ */
+struct cmd_i2c_xfer_request {
+ /** @brief valid bus number from mach-t186/i2c-t186.h*/
+ uint32_t bus_id;
+
+ /** @brief count of valid bytes in #data_buf*/
+ uint32_t data_size;
+
+ /** @brief serialized packed instances of @ref serial_i2c_request*/
+ uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief container for data read from the i2c bus
+ *
+ * Processing an cmd_i2c_xfer_request::data_buf causes BPMP to execute
+ * zero or more I2C reads. The data read from the bus is serialized
+ * into #data_buf.
+ */
+struct cmd_i2c_xfer_response {
+ /** @brief count of valid bytes in #data_buf*/
+ uint32_t data_size;
+ /** @brief i2c read data */
+ uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief request with #MRQ_I2C
+ */
+struct mrq_i2c_request {
+ /** @brief always CMD_I2C_XFER (i.e. 1) */
+ uint32_t cmd;
+ /** @brief parameters of the transfer request */
+ struct cmd_i2c_xfer_request xfer;
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief response to #MRQ_I2C
+ */
+struct mrq_i2c_response {
+ struct cmd_i2c_xfer_response xfer;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_CLK
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_clk_request
+ * * Response Payload: @ref mrq_clk_response
+ * @addtogroup Clocks
+ * @{
+ */
+
+/**
+ * @name MRQ_CLK sub-commands
+ * @{
+ */
+enum {
+ CMD_CLK_GET_RATE = 1,
+ CMD_CLK_SET_RATE = 2,
+ CMD_CLK_ROUND_RATE = 3,
+ CMD_CLK_GET_PARENT = 4,
+ CMD_CLK_SET_PARENT = 5,
+ CMD_CLK_IS_ENABLED = 6,
+ CMD_CLK_ENABLE = 7,
+ CMD_CLK_DISABLE = 8,
+ CMD_CLK_GET_ALL_INFO = 14,
+ CMD_CLK_GET_MAX_CLK_ID = 15,
+ CMD_CLK_MAX,
+};
+/** @} */
+
+#define MRQ_CLK_NAME_MAXLEN 40
+#define MRQ_CLK_MAX_PARENTS 16
+
+/** @private */
+struct cmd_clk_get_rate_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_rate_response {
+ int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_set_rate_request {
+ int32_t unused;
+ int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_set_rate_response {
+ int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_round_rate_request {
+ int32_t unused;
+ int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_round_rate_response {
+ int64_t rate;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_parent_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_parent_response {
+ uint32_t parent_id;
+} __ABI_PACKED;
+
+struct cmd_clk_set_parent_request {
+ uint32_t parent_id;
+} __ABI_PACKED;
+
+struct cmd_clk_set_parent_response {
+ uint32_t parent_id;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_is_enabled_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_is_enabled_response {
+ int32_t state;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_enable_request {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_enable_response {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_disable_request {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_disable_response {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_all_info_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_all_info_response {
+ uint32_t flags;
+ uint32_t parent;
+ uint32_t parents[MRQ_CLK_MAX_PARENTS];
+ uint8_t num_parents;
+ uint8_t name[MRQ_CLK_NAME_MAXLEN];
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_max_clk_id_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_max_clk_id_response {
+ uint32_t max_id;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup Clocks
+ * @brief request with #MRQ_CLK
+ *
+ * Used by the sender of an #MRQ_CLK message to control clocks. The
+ * clk_request is split into several sub-commands. Some sub-commands
+ * require no additional data. Others have a sub-command specific
+ * payload
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------|
+ * |CMD_CLK_GET_RATE |- |
+ * |CMD_CLK_SET_RATE |clk_set_rate |
+ * |CMD_CLK_ROUND_RATE |clk_round_rate |
+ * |CMD_CLK_GET_PARENT |- |
+ * |CMD_CLK_SET_PARENT |clk_set_parent |
+ * |CMD_CLK_IS_ENABLED |- |
+ * |CMD_CLK_ENABLE |- |
+ * |CMD_CLK_DISABLE |- |
+ * |CMD_CLK_GET_ALL_INFO |- |
+ * |CMD_CLK_GET_MAX_CLK_ID |- |
+ *
+ */
+
+struct mrq_clk_request {
+ /** @brief sub-command and clock id concatenated to 32-bit word.
+ * - bits[31..24] is the sub-cmd.
+ * - bits[23..0] is the clock id
+ */
+ uint32_t cmd_and_id;
+
+ union {
+ /** @private */
+ struct cmd_clk_get_rate_request clk_get_rate;
+ struct cmd_clk_set_rate_request clk_set_rate;
+ struct cmd_clk_round_rate_request clk_round_rate;
+ /** @private */
+ struct cmd_clk_get_parent_request clk_get_parent;
+ struct cmd_clk_set_parent_request clk_set_parent;
+ /** @private */
+ struct cmd_clk_enable_request clk_enable;
+ /** @private */
+ struct cmd_clk_disable_request clk_disable;
+ /** @private */
+ struct cmd_clk_is_enabled_request clk_is_enabled;
+ /** @private */
+ struct cmd_clk_get_all_info_request clk_get_all_info;
+ /** @private */
+ struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Clocks
+ * @brief response to MRQ_CLK
+ *
+ * Each sub-command supported by @ref mrq_clk_request may return
+ * sub-command-specific data. Some do and some do not as indicated in
+ * the following table
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------|
+ * |CMD_CLK_GET_RATE |clk_get_rate |
+ * |CMD_CLK_SET_RATE |clk_set_rate |
+ * |CMD_CLK_ROUND_RATE |clk_round_rate |
+ * |CMD_CLK_GET_PARENT |clk_get_parent |
+ * |CMD_CLK_SET_PARENT |clk_set_parent |
+ * |CMD_CLK_IS_ENABLED |clk_is_enabled |
+ * |CMD_CLK_ENABLE |- |
+ * |CMD_CLK_DISABLE |- |
+ * |CMD_CLK_GET_ALL_INFO |clk_get_all_info |
+ * |CMD_CLK_GET_MAX_CLK_ID |clk_get_max_id |
+ *
+ */
+
+struct mrq_clk_response {
+ union {
+ struct cmd_clk_get_rate_response clk_get_rate;
+ struct cmd_clk_set_rate_response clk_set_rate;
+ struct cmd_clk_round_rate_response clk_round_rate;
+ struct cmd_clk_get_parent_response clk_get_parent;
+ struct cmd_clk_set_parent_response clk_set_parent;
+ /** @private */
+ struct cmd_clk_enable_response clk_enable;
+ /** @private */
+ struct cmd_clk_disable_response clk_disable;
+ struct cmd_clk_is_enabled_response clk_is_enabled;
+ struct cmd_clk_get_all_info_response clk_get_all_info;
+ struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_QUERY_ABI
+ * @brief check if an MRQ is implemented
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: @ref mrq_query_abi_request
+ * * Response Payload: @ref mrq_query_abi_response
+ */
+
+/**
+ * @ingroup ABI_info
+ * @brief request with MRQ_QUERY_ABI
+ *
+ * Used by #MRQ_QUERY_ABI call to check if MRQ code #mrq is supported
+ * by the recipient.
+ */
+struct mrq_query_abi_request {
+ /** @brief MRQ code to query */
+ uint32_t mrq;
+} __ABI_PACKED;
+
+/**
+ * @ingroup ABI_info
+ * @brief response to MRQ_QUERY_ABI
+ */
+struct mrq_query_abi_response {
+ /** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */
+ int32_t status;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PG_READ_STATE
+ * @brief read the power-gating state of a partition
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pg_read_state_request
+ * * Response Payload: @ref mrq_pg_read_state_response
+ * @addtogroup Powergating
+ * @{
+ */
+
+/**
+ * @brief request with #MRQ_PG_READ_STATE
+ *
+ * Used by MRQ_PG_READ_STATE call to read the current state of a
+ * partition.
+ */
+struct mrq_pg_read_state_request {
+ /** @brief ID of partition */
+ uint32_t partition_id;
+} __ABI_PACKED;
+
+/**
+ * @brief response to MRQ_PG_READ_STATE
+ * @todo define possible errors.
+ */
+struct mrq_pg_read_state_response {
+ /** @brief read as don't care */
+ uint32_t sram_state;
+ /** @brief state of power partition
+ * * 0 : off
+ * * 1 : on
+ */
+ uint32_t logic_state;
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PG_UPDATE_STATE
+ * @brief modify the power-gating state of a partition
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pg_update_state_request
+ * * Response Payload: N/A
+ * @addtogroup Powergating
+ * @{
+ */
+
+/**
+ * @brief request with mrq_pg_update_state_request
+ *
+ * Used by #MRQ_PG_UPDATE_STATE call to request BPMP to change the
+ * state of a power partition #partition_id.
+ */
+struct mrq_pg_update_state_request {
+ /** @brief ID of partition */
+ uint32_t partition_id;
+ /** @brief secondary control of power partition
+ * @details Ignored by many versions of the BPMP
+ * firmware. For maximum compatibility, set the value
+ * according to @logic_state
+ * * 0x1: power ON partition (@ref logic_state == 0x3)
+ * * 0x3: power OFF partition (@ref logic_state == 0x1)
+ */
+ uint32_t sram_state;
+ /** @brief controls state of power partition, legal values are
+ * * 0x1 : power OFF partition
+ * * 0x3 : power ON partition
+ */
+ uint32_t logic_state;
+ /** @brief change state of clocks of the power partition, legal values
+ * * 0x0 : do not change clock state
+ * * 0x1 : disable partition clocks (only applicable when
+ * @ref logic_state == 0x1)
+ * * 0x3 : enable partition clocks (only applicable when
+ * @ref logic_state == 0x3)
+ */
+ uint32_t clock_state;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_THERMAL
+ * @brief interact with BPMP thermal framework
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: TODO
+ * * Response Payload: TODO
+ *
+ * @addtogroup Thermal
+ *
+ * The BPMP firmware includes a thermal framework. Drivers within the
+ * bpmp firmware register with the framework to provide thermal
+ * zones. Each thermal zone corresponds to an entity whose temperature
+ * can be measured. The framework also has a notion of trip points. A
+ * trip point consists of a thermal zone id, a temperature, and a
+ * callback routine. The framework invokes the callback when the zone
+ * hits the indicated temperature. The BPMP firmware uses this thermal
+ * framework interally to implement various temperature-dependent
+ * functions.
+ *
+ * Software on the CPU can use #MRQ_THERMAL (with payload @ref
+ * mrq_thermal_host_to_bpmp_request) to interact with the BPMP thermal
+ * framework. The CPU must It can query the number of supported zones,
+ * query zone temperatures, and set trip points.
+ *
+ * When a trip point set by the CPU gets crossed, BPMP firmware issues
+ * an IPC to the CPU having mrq_request::mrq = #MRQ_THERMAL and a
+ * payload of @ref mrq_thermal_bpmp_to_host_request.
+ * @{
+ */
+enum mrq_thermal_host_to_bpmp_cmd {
+ /**
+ * @brief Check whether the BPMP driver supports the specified
+ * request type.
+ *
+ * Host needs to supply request parameters.
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_THERMAL_QUERY_ABI = 0,
+
+ /**
+ * @brief Get the current temperature of the specified zone.
+ *
+ * Host needs to supply request parameters.
+ *
+ * mrq_response::err is
+ * * 0: Temperature query succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOENT: No driver registered for thermal zone..
+ * * -#BPMP_EFAULT: Problem reading temperature measurement.
+ */
+ CMD_THERMAL_GET_TEMP = 1,
+
+ /**
+ * @brief Enable or disable and set the lower and upper
+ * thermal limits for a thermal trip point. Each zone has
+ * one trip point.
+ *
+ * Host needs to supply request parameters. Once the
+ * temperature hits a trip point, the BPMP will send a message
+ * to the CPU having MRQ=MRQ_THERMAL and
+ * type=CMD_THERMAL_HOST_TRIP_REACHED
+ *
+ * mrq_response::err is
+ * * 0: Trip successfully set.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOENT: No driver registered for thermal zone.
+ * * -#BPMP_EFAULT: Problem setting trip point.
+ */
+ CMD_THERMAL_SET_TRIP = 2,
+
+ /**
+ * @brief Get the number of supported thermal zones.
+ *
+ * No request parameters required.
+ *
+ * mrq_response::err is always 0, indicating success.
+ */
+ CMD_THERMAL_GET_NUM_ZONES = 3,
+
+ /** @brief: number of supported host-to-bpmp commands. May
+ * increase in future
+ */
+ CMD_THERMAL_HOST_TO_BPMP_NUM
+};
+
+enum mrq_thermal_bpmp_to_host_cmd {
+ /**
+ * @brief Indication that the temperature for a zone has
+ * exceeded the range indicated in the thermal trip point
+ * for the zone.
+ *
+ * BPMP needs to supply request parameters. Host only needs to
+ * acknowledge.
+ */
+ CMD_THERMAL_HOST_TRIP_REACHED = 100,
+
+ /** @brief: number of supported bpmp-to-host commands. May
+ * increase in future
+ */
+ CMD_THERMAL_BPMP_TO_HOST_NUM
+};
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_QUERY_ABI
+ *
+ * zone: Request type for which to check existence.
+ */
+struct cmd_thermal_query_abi_request {
+ uint32_t type;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_GET_TEMP
+ *
+ * zone: Number of thermal zone.
+ */
+struct cmd_thermal_get_temp_request {
+ uint32_t zone;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request CMD_THERMAL_GET_TEMP
+ *
+ * error: 0 if request succeeded.
+ * -BPMP_EINVAL if request parameters were invalid.
+ * -BPMP_ENOENT if no driver was registered for the specified thermal zone.
+ * -BPMP_EFAULT for other thermal zone driver errors.
+ * temp: Current temperature in millicelsius.
+ */
+struct cmd_thermal_get_temp_response {
+ int32_t temp;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_SET_TRIP
+ *
+ * zone: Number of thermal zone.
+ * low: Temperature of lower trip point in millicelsius
+ * high: Temperature of upper trip point in millicelsius
+ * enabled: 1 to enable trip point, 0 to disable trip point
+ */
+struct cmd_thermal_set_trip_request {
+ uint32_t zone;
+ int32_t low;
+ int32_t high;
+ uint32_t enabled;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host request data for request type CMD_THERMAL_HOST_TRIP_REACHED
+ *
+ * zone: Number of thermal zone where trip point was reached.
+ */
+struct cmd_thermal_host_trip_reached_request {
+ uint32_t zone;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request type CMD_THERMAL_GET_NUM_ZONES
+ *
+ * num: Number of supported thermal zones. The thermal zones are indexed
+ * starting from zero.
+ */
+struct cmd_thermal_get_num_zones_response {
+ uint32_t num;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data.
+ *
+ * Reply type is union mrq_thermal_bpmp_to_host_response.
+ *
+ * type: Type of request. Values listed in enum mrq_thermal_type.
+ * data: Request type specific parameters.
+ */
+struct mrq_thermal_host_to_bpmp_request {
+ uint32_t type;
+ union {
+ struct cmd_thermal_query_abi_request query_abi;
+ struct cmd_thermal_get_temp_request get_temp;
+ struct cmd_thermal_set_trip_request set_trip;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host request data.
+ *
+ * type: Type of request. Values listed in enum mrq_thermal_type.
+ * data: Request type specific parameters.
+ */
+struct mrq_thermal_bpmp_to_host_request {
+ uint32_t type;
+ union {
+ struct cmd_thermal_host_trip_reached_request host_trip_reached;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/*
+ * Data in reply to a Host->BPMP request.
+ */
+union mrq_thermal_bpmp_to_host_response {
+ struct cmd_thermal_get_temp_response get_temp;
+ struct cmd_thermal_get_num_zones_response get_num_zones;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_CPU_VHINT
+ * @brief Query CPU voltage hint data
+ *
+ * * Platforms: T186
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_cpu_vhint_request
+ * * Response Payload: N/A
+ *
+ * @addtogroup Vhint CPU Voltage hint
+ * @{
+ */
+
+/**
+ * @brief request with #MRQ_CPU_VHINT
+ *
+ * Used by #MRQ_CPU_VHINT call by CCPLEX to retrieve voltage hint data
+ * from BPMP to memory space pointed by #addr. CCPLEX is responsible
+ * to allocate sizeof(cpu_vhint_data) sized block of memory and
+ * appropriately map it for BPMP before sending the request.
+ */
+struct mrq_cpu_vhint_request {
+ /** @brief IOVA address for the #cpu_vhint_data */
+ uint32_t addr; /* struct cpu_vhint_data * */
+ /** @brief ID of the cluster whose data is requested */
+ uint32_t cluster_id; /* enum cluster_id */
+} __ABI_PACKED;
+
+/**
+ * @brief description of the CPU v/f relation
+ *
+ * Used by #MRQ_CPU_VHINT call to carry data pointed by #addr of
+ * struct mrq_cpu_vhint_request
+ */
+struct cpu_vhint_data {
+ uint32_t ref_clk_hz; /**< reference frequency in Hz */
+ uint16_t pdiv; /**< post divider value */
+ uint16_t mdiv; /**< input divider value */
+ uint16_t ndiv_max; /**< fMAX expressed with max NDIV value */
+ /** table of ndiv values as a function of vINDEX (voltage index) */
+ uint16_t ndiv[80];
+ /** minimum allowed NDIV value */
+ uint16_t ndiv_min;
+ /** minimum allowed voltage hint value (as in vINDEX) */
+ uint16_t vfloor;
+ /** maximum allowed voltage hint value (as in vINDEX) */
+ uint16_t vceil;
+ /** post-multiplier for vindex value */
+ uint16_t vindex_mult;
+ /** post-divider for vindex value */
+ uint16_t vindex_div;
+ /** reserved for future use */
+ uint16_t reserved[328];
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_ABI_RATCHET
+ * @brief ABI ratchet value query
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_abi_ratchet_request
+ * * Response Payload: @ref mrq_abi_ratchet_response
+ * @addtogroup ABI_info
+ * @{
+ */
+
+/**
+ * @brief an ABI compatibility mechanism
+ *
+ * BPMP_ABI_RATCHET_VALUE may increase for various reasons in a future
+ * revision of this header file.
+ * 1. That future revision deprecates some MRQ
+ * 2. That future revision introduces a breaking change to an existing
+ * MRQ or
+ * 3. A bug is discovered in an existing implementation of the BPMP-FW
+ * (or possibly one of its clients) which warrants deprecating that
+ * implementation.
+ */
+#define BPMP_ABI_RATCHET_VALUE 3
+
+/**
+ * @brief request with #MRQ_ABI_RATCHET.
+ *
+ * #ratchet should be #BPMP_ABI_RATCHET_VALUE from the ABI header
+ * against which the requester was compiled.
+ *
+ * If ratchet is less than BPMP's #BPMP_ABI_RATCHET_VALUE, BPMP may
+ * reply with mrq_response::err = -#BPMP_ERANGE to indicate that
+ * BPMP-FW cannot interoperate correctly with the requester. Requester
+ * should cease further communication with BPMP.
+ *
+ * Otherwise, err shall be 0.
+ */
+struct mrq_abi_ratchet_request {
+ /** @brief requester's ratchet value */
+ uint16_t ratchet;
+};
+
+/**
+ * @brief response to #MRQ_ABI_RATCHET
+ *
+ * #ratchet shall be #BPMP_ABI_RATCHET_VALUE from the ABI header
+ * against which BPMP firwmare was compiled.
+ *
+ * If #ratchet is less than the requester's #BPMP_ABI_RATCHET_VALUE,
+ * the requster must either interoperate with BPMP according to an ABI
+ * header version with BPMP_ABI_RATCHET_VALUE = ratchet or cease
+ * communication with BPMP.
+ *
+ * If mrq_response::err is 0 and ratchet is greater than or equal to the
+ * requester's BPMP_ABI_RATCHET_VALUE, the requester should continue
+ * normal operation.
+ */
+struct mrq_abi_ratchet_response {
+ /** @brief BPMP's ratchet value */
+ uint16_t ratchet;
+};
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_EMC_DVFS_LATENCY
+ * @brief query frequency dependent EMC DVFS latency
+ *
+ * * Platforms: T186
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_emc_dvfs_latency_response
+ * @addtogroup EMC
+ * @{
+ */
+
+/**
+ * @brief used by @ref mrq_emc_dvfs_latency_response
+ */
+struct emc_dvfs_latency {
+ /** @brief EMC frequency in kHz */
+ uint32_t freq;
+ /** @brief EMC DVFS latency in nanoseconds */
+ uint32_t latency;
+} __ABI_PACKED;
+
+#define EMC_DVFS_LATENCY_MAX_SIZE 14
+/**
+ * @brief response to #MRQ_EMC_DVFS_LATENCY
+ */
+struct mrq_emc_dvfs_latency_response {
+ /** @brief the number valid entries in #pairs */
+ uint32_t num_pairs;
+ /** @brief EMC <frequency, latency> information */
+ struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_TRACE_ITER
+ * @brief manage the trace iterator
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_trace_iter_request
+ * @addtogroup Trace
+ * @{
+ */
+enum {
+ /** @brief (re)start the tracing now. Ignore older events */
+ TRACE_ITER_INIT = 0,
+ /** @brief clobber all events in the trace buffer */
+ TRACE_ITER_CLEAN = 1
+};
+
+/**
+ * @brief request with #MRQ_TRACE_ITER
+ */
+struct mrq_trace_iter_request {
+ /** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
+ uint32_t cmd;
+} __ABI_PACKED;
+
+/** @} */
+
+/*
+ * 4. Enumerations
+ */
+
+/*
+ * 4.1 CPU enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ *
+ * 4.2 CPU Cluster enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ *
+ * 4.3 System low power state enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ */
+
+/*
+ * 4.4 Clock enumerations
+ *
+ * For clock enumerations, see <mach-t186/clk-t186.h>
+ */
+
+/*
+ * 4.5 Reset enumerations
+ *
+ * For reset enumerations, see <mach-t186/reset-t186.h>
+ */
+
+/*
+ * 4.6 Thermal sensor enumerations
+ *
+ * For thermal sensor enumerations, see <mach-t186/thermal-t186.h>
+ */
+
+/**
+ * @defgroup Error_Codes
+ * Negative values for mrq_response::err generally indicate some
+ * error. The ABI defines the following error codes. Negating these
+ * defines is an exercise left to the user.
+ * @{
+ */
+/** @brief No such file or directory */
+#define BPMP_ENOENT 2
+/** @brief No MRQ handler */
+#define BPMP_ENOHANDLER 3
+/** @brief I/O error */
+#define BPMP_EIO 5
+/** @brief Bad sub-MRQ command */
+#define BPMP_EBADCMD 6
+/** @brief Not enough memory */
+#define BPMP_ENOMEM 12
+/** @brief Permission denied */
+#define BPMP_EACCES 13
+/** @brief Bad address */
+#define BPMP_EFAULT 14
+/** @brief No such device */
+#define BPMP_ENODEV 19
+/** @brief Argument is a directory */
+#define BPMP_EISDIR 21
+/** @brief Invalid argument */
+#define BPMP_EINVAL 22
+/** @brief Timeout during operation */
+#define BPMP_ETIMEDOUT 23
+/** @brief Out of range */
+#define BPMP_ERANGE 34
+/** @} */
+/** @} */
+#endif
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
new file mode 100644
index 000000000000..13dcd44e91bb
--- /dev/null
+++ b/include/soc/tegra/bpmp.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SOC_TEGRA_BPMP_H
+#define __SOC_TEGRA_BPMP_H
+
+#include <linux/mailbox_client.h>
+#include <linux/reset-controller.h>
+#include <linux/semaphore.h>
+#include <linux/types.h>
+
+#include <soc/tegra/bpmp-abi.h>
+
+struct tegra_bpmp_clk;
+
+struct tegra_bpmp_soc {
+ struct {
+ struct {
+ unsigned int offset;
+ unsigned int count;
+ unsigned int timeout;
+ } cpu_tx, thread, cpu_rx;
+ } channels;
+ unsigned int num_resets;
+};
+
+struct tegra_bpmp_mb_data {
+ u32 code;
+ u32 flags;
+ u8 data[MSG_DATA_MIN_SZ];
+} __packed;
+
+struct tegra_bpmp_channel {
+ struct tegra_bpmp *bpmp;
+ struct tegra_bpmp_mb_data *ib;
+ struct tegra_bpmp_mb_data *ob;
+ struct completion completion;
+ struct tegra_ivc *ivc;
+};
+
+typedef void (*tegra_bpmp_mrq_handler_t)(unsigned int mrq,
+ struct tegra_bpmp_channel *channel,
+ void *data);
+
+struct tegra_bpmp_mrq {
+ struct list_head list;
+ unsigned int mrq;
+ tegra_bpmp_mrq_handler_t handler;
+ void *data;
+};
+
+struct tegra_bpmp {
+ const struct tegra_bpmp_soc *soc;
+ struct device *dev;
+
+ struct {
+ struct gen_pool *pool;
+ dma_addr_t phys;
+ void *virt;
+ } tx, rx;
+
+ struct {
+ struct mbox_client client;
+ struct mbox_chan *channel;
+ } mbox;
+
+ struct tegra_bpmp_channel *channels;
+ unsigned int num_channels;
+
+ struct {
+ unsigned long *allocated;
+ unsigned long *busy;
+ unsigned int count;
+ struct semaphore lock;
+ } threaded;
+
+ struct list_head mrqs;
+ spinlock_t lock;
+
+ struct tegra_bpmp_clk **clocks;
+ unsigned int num_clocks;
+
+ struct reset_controller_dev rstc;
+};
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev);
+void tegra_bpmp_put(struct tegra_bpmp *bpmp);
+
+struct tegra_bpmp_message {
+ unsigned int mrq;
+
+ struct {
+ const void *data;
+ size_t size;
+ } tx;
+
+ struct {
+ void *data;
+ size_t size;
+ } rx;
+};
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg);
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg);
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ tegra_bpmp_mrq_handler_t handler, void *data);
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ void *data);
+
+#if IS_ENABLED(CONFIG_CLK_TEGRA_BPMP)
+int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp);
+#else
+static inline int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_RESET_TEGRA_BPMP)
+int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp);
+#else
+static inline int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
+{
+ return 0;
+}
+#endif
+
+#endif /* __SOC_TEGRA_BPMP_H */
diff --git a/include/soc/tegra/ivc.h b/include/soc/tegra/ivc.h
new file mode 100644
index 000000000000..b13cc43ad9d8
--- /dev/null
+++ b/include/soc/tegra/ivc.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_IVC_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+struct tegra_ivc_header;
+
+struct tegra_ivc {
+ struct device *peer;
+
+ struct {
+ struct tegra_ivc_header *channel;
+ unsigned int position;
+ dma_addr_t phys;
+ } rx, tx;
+
+ void (*notify)(struct tegra_ivc *ivc, void *data);
+ void *notify_data;
+
+ unsigned int num_frames;
+ size_t frame_size;
+};
+
+/**
+ * tegra_ivc_read_get_next_frame - Peek at the next frame to receive
+ * @ivc pointer of the IVC channel
+ *
+ * Peek at the next frame to be received, without removing it from
+ * the queue.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_read_advance - Advance the read queue
+ * @ivc pointer of the IVC channel
+ *
+ * Advance the read queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_ivc_read_advance(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_write_get_next_frame - Poke at the next frame to transmit
+ * @ivc pointer of the IVC channel
+ *
+ * Get access to the next frame.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_write_advance - Advance the write queue
+ * @ivc pointer of the IVC channel
+ *
+ * Advance the write queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_notified - handle internal messages
+ * @ivc pointer of the IVC channel
+ *
+ * This function must be called following every notification.
+ *
+ * Returns 0 if the channel is ready for communication, or -EAGAIN if a channel
+ * reset is in progress.
+ */
+int tegra_ivc_notified(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_reset - initiates a reset of the shared memory state
+ * @ivc pointer of the IVC channel
+ *
+ * This function must be called after a channel is reserved before it is used
+ * for communication. The channel will be ready for use when a subsequent call
+ * to notify the remote of the channel reset.
+ */
+void tegra_ivc_reset(struct tegra_ivc *ivc);
+
+size_t tegra_ivc_align(size_t size);
+unsigned tegra_ivc_total_queue_size(unsigned queue_size);
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+ dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+ unsigned int num_frames, size_t frame_size,
+ void (*notify)(struct tegra_ivc *ivc, void *data),
+ void *data);
+void tegra_ivc_cleanup(struct tegra_ivc *ivc);
+
+#endif /* __TEGRA_IVC_H */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index e9e53473a63e..2f271d1b9cea 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -76,37 +76,73 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid);
#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
-#define TEGRA_IO_RAIL_CSIA 0
-#define TEGRA_IO_RAIL_CSIB 1
-#define TEGRA_IO_RAIL_DSI 2
-#define TEGRA_IO_RAIL_MIPI_BIAS 3
-#define TEGRA_IO_RAIL_PEX_BIAS 4
-#define TEGRA_IO_RAIL_PEX_CLK1 5
-#define TEGRA_IO_RAIL_PEX_CLK2 6
-#define TEGRA_IO_RAIL_USB0 9
-#define TEGRA_IO_RAIL_USB1 10
-#define TEGRA_IO_RAIL_USB2 11
-#define TEGRA_IO_RAIL_USB_BIAS 12
-#define TEGRA_IO_RAIL_NAND 13
-#define TEGRA_IO_RAIL_UART 14
-#define TEGRA_IO_RAIL_BB 15
-#define TEGRA_IO_RAIL_AUDIO 17
-#define TEGRA_IO_RAIL_HSIC 19
-#define TEGRA_IO_RAIL_COMP 22
-#define TEGRA_IO_RAIL_HDMI 28
-#define TEGRA_IO_RAIL_PEX_CNTRL 32
-#define TEGRA_IO_RAIL_SDMMC1 33
-#define TEGRA_IO_RAIL_SDMMC3 34
-#define TEGRA_IO_RAIL_SDMMC4 35
-#define TEGRA_IO_RAIL_CAM 36
-#define TEGRA_IO_RAIL_RES 37
-#define TEGRA_IO_RAIL_HV 38
-#define TEGRA_IO_RAIL_DSIB 39
-#define TEGRA_IO_RAIL_DSIC 40
-#define TEGRA_IO_RAIL_DSID 41
-#define TEGRA_IO_RAIL_CSIE 44
-#define TEGRA_IO_RAIL_LVDS 57
-#define TEGRA_IO_RAIL_SYS_DDC 58
+/**
+ * enum tegra_io_pad - I/O pad group identifier
+ *
+ * I/O pins on Tegra SoCs are grouped into so-called I/O pads. Each such pad
+ * can be used to control the common voltage signal level and power state of
+ * the pins of the given pad.
+ */
+enum tegra_io_pad {
+ TEGRA_IO_PAD_AUDIO,
+ TEGRA_IO_PAD_AUDIO_HV,
+ TEGRA_IO_PAD_BB,
+ TEGRA_IO_PAD_CAM,
+ TEGRA_IO_PAD_COMP,
+ TEGRA_IO_PAD_CSIA,
+ TEGRA_IO_PAD_CSIB,
+ TEGRA_IO_PAD_CSIC,
+ TEGRA_IO_PAD_CSID,
+ TEGRA_IO_PAD_CSIE,
+ TEGRA_IO_PAD_CSIF,
+ TEGRA_IO_PAD_DBG,
+ TEGRA_IO_PAD_DEBUG_NONAO,
+ TEGRA_IO_PAD_DMIC,
+ TEGRA_IO_PAD_DP,
+ TEGRA_IO_PAD_DSI,
+ TEGRA_IO_PAD_DSIB,
+ TEGRA_IO_PAD_DSIC,
+ TEGRA_IO_PAD_DSID,
+ TEGRA_IO_PAD_EMMC,
+ TEGRA_IO_PAD_EMMC2,
+ TEGRA_IO_PAD_GPIO,
+ TEGRA_IO_PAD_HDMI,
+ TEGRA_IO_PAD_HSIC,
+ TEGRA_IO_PAD_HV,
+ TEGRA_IO_PAD_LVDS,
+ TEGRA_IO_PAD_MIPI_BIAS,
+ TEGRA_IO_PAD_NAND,
+ TEGRA_IO_PAD_PEX_BIAS,
+ TEGRA_IO_PAD_PEX_CLK1,
+ TEGRA_IO_PAD_PEX_CLK2,
+ TEGRA_IO_PAD_PEX_CNTRL,
+ TEGRA_IO_PAD_SDMMC1,
+ TEGRA_IO_PAD_SDMMC3,
+ TEGRA_IO_PAD_SDMMC4,
+ TEGRA_IO_PAD_SPI,
+ TEGRA_IO_PAD_SPI_HV,
+ TEGRA_IO_PAD_SYS_DDC,
+ TEGRA_IO_PAD_UART,
+ TEGRA_IO_PAD_USB0,
+ TEGRA_IO_PAD_USB1,
+ TEGRA_IO_PAD_USB2,
+ TEGRA_IO_PAD_USB3,
+ TEGRA_IO_PAD_USB_BIAS,
+};
+
+/* deprecated, use TEGRA_IO_PAD_{HDMI,LVDS} instead */
+#define TEGRA_IO_RAIL_HDMI TEGRA_IO_PAD_HDMI
+#define TEGRA_IO_RAIL_LVDS TEGRA_IO_PAD_LVDS
+
+/**
+ * enum tegra_io_pad_voltage - voltage level of the I/O pad's source rail
+ * @TEGRA_IO_PAD_1800000UV: 1.8 V
+ * @TEGRA_IO_PAD_3300000UV: 3.3 V
+ */
+enum tegra_io_pad_voltage {
+ TEGRA_IO_PAD_1800000UV,
+ TEGRA_IO_PAD_3300000UV,
+};
#ifdef CONFIG_ARCH_TEGRA
int tegra_powergate_is_powered(unsigned int id);
@@ -118,6 +154,13 @@ int tegra_powergate_remove_clamping(unsigned int id);
int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct reset_control *rst);
+int tegra_io_pad_power_enable(enum tegra_io_pad id);
+int tegra_io_pad_power_disable(enum tegra_io_pad id);
+int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+ enum tegra_io_pad_voltage voltage);
+int tegra_io_pad_get_voltage(enum tegra_io_pad id);
+
+/* deprecated, use tegra_io_pad_power_{enable,disable}() instead */
int tegra_io_rail_power_on(unsigned int id);
int tegra_io_rail_power_off(unsigned int id);
#else
@@ -148,6 +191,27 @@ static inline int tegra_powergate_sequence_power_up(unsigned int id,
return -ENOSYS;
}
+static inline int tegra_io_pad_power_enable(enum tegra_io_pad id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_io_pad_power_disable(enum tegra_io_pad id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+ enum tegra_io_pad_voltage voltage)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+{
+ return -ENOSYS;
+}
+
static inline int tegra_io_rail_power_on(unsigned int id)
{
return -ENOSYS;
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index cee8c00f3d3e..9924bc9cbc7c 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -155,6 +155,7 @@ struct snd_compr {
struct mutex lock;
int device;
#ifdef CONFIG_SND_VERBOSE_PROCFS
+ /* private: */
char id[64];
struct snd_info_entry *proc_root;
struct snd_info_entry *proc_info_entry;
diff --git a/include/sound/core.h b/include/sound/core.h
index 31079ea5e484..f7d8c10c4c45 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -308,8 +308,8 @@ __printf(4, 5)
void __snd_printk(unsigned int level, const char *file, int line,
const char *format, ...);
#else
-#define __snd_printk(level, file, line, format, args...) \
- printk(format, ##args)
+#define __snd_printk(level, file, line, format, ...) \
+ printk(format, ##__VA_ARGS__)
#endif
/**
@@ -319,8 +319,8 @@ void __snd_printk(unsigned int level, const char *file, int line,
* Works like printk() but prints the file and the line of the caller
* when configured with CONFIG_SND_VERBOSE_PRINTK.
*/
-#define snd_printk(fmt, args...) \
- __snd_printk(0, __FILE__, __LINE__, fmt, ##args)
+#define snd_printk(fmt, ...) \
+ __snd_printk(0, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
#ifdef CONFIG_SND_DEBUG
/**
@@ -330,10 +330,10 @@ void __snd_printk(unsigned int level, const char *file, int line,
* Works like snd_printk() for debugging purposes.
* Ignored when CONFIG_SND_DEBUG is not set.
*/
-#define snd_printd(fmt, args...) \
- __snd_printk(1, __FILE__, __LINE__, fmt, ##args)
-#define _snd_printd(level, fmt, args...) \
- __snd_printk(level, __FILE__, __LINE__, fmt, ##args)
+#define snd_printd(fmt, ...) \
+ __snd_printk(1, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
+#define _snd_printd(level, fmt, ...) \
+ __snd_printk(level, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
/**
* snd_BUG - give a BUG warning message and stack trace
@@ -383,8 +383,8 @@ static inline bool snd_printd_ratelimit(void) { return false; }
* Works like snd_printk() for debugging purposes.
* Ignored when CONFIG_SND_DEBUG_VERBOSE is not set.
*/
-#define snd_printdd(format, args...) \
- __snd_printk(2, __FILE__, __LINE__, format, ##args)
+#define snd_printdd(format, ...) \
+ __snd_printk(2, __FILE__, __LINE__, format, ##__VA_ARGS__)
#else
__printf(1, 2)
static inline void snd_printdd(const char *format, ...) {}
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 1c8f9e1ef2a5..67be2445941a 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -71,6 +71,7 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* @slave_id: Slave requester id for the DMA channel.
* @filter_data: Custom DMA channel filter data, this will usually be used when
* requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
* @fifo_size: FIFO size of the DAI controller in bytes
* @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
*/
@@ -80,6 +81,7 @@ struct snd_dmaengine_dai_dma_data {
u32 maxburst;
unsigned int slave_id;
void *filter_data;
+ const char *chan_name;
unsigned int fifo_size;
unsigned int flags;
};
@@ -105,6 +107,10 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
* playback.
*/
#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+/*
+ * The PCM streams have custom channel names specified.
+ */
+#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4)
/**
* struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 5bd134651f5e..4f42affe777c 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1688,7 +1688,8 @@ struct snd_emu1010 {
unsigned int internal_clock; /* 44100 or 48000 */
unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
- struct task_struct *firmware_thread;
+ struct delayed_work firmware_work;
+ u32 last_reg;
};
struct snd_emu10k1 {
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index 796cabf6be5e..5ab972e116ec 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -10,8 +10,9 @@
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_i915_init(struct hdac_bus *bus);
int snd_hdac_i915_exit(struct hdac_bus *bus);
@@ -29,13 +30,13 @@ static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
}
static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
- hda_nid_t nid, int rate)
+ hda_nid_t nid, int dev_id, int rate)
{
return 0;
}
static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
- bool *audio_enabled, char *buffer,
- int max_bytes)
+ int dev_id, bool *audio_enabled,
+ char *buffer, int max_bytes)
{
return -ENODEV;
}
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 530c57bdefa0..915c4357945c 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -36,10 +36,10 @@ struct hdmi_codec_daifmt {
HDMI_AC97,
HDMI_SPDIF,
} fmt;
- int bit_clk_inv:1;
- int frame_clk_inv:1;
- int bit_clk_master:1;
- int frame_clk_master:1;
+ unsigned int bit_clk_inv:1;
+ unsigned int frame_clk_inv:1;
+ unsigned int bit_clk_master:1;
+ unsigned int frame_clk_master:1;
};
/*
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 200e1f04c166..58acd00cae19 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -256,6 +256,9 @@ struct snd_soc_dai_driver {
int (*resume)(struct snd_soc_dai *dai);
/* compress dai */
int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
+ /* Optional Callback used at pcm creation*/
+ int (*pcm_new)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
/* DAI is also used for the control bus */
bool bus_control;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 2b502f6cc6d0..c6cabf3daebe 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -497,6 +497,8 @@ void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
unsigned int dai_fmt);
+int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour);
+
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params);
@@ -507,9 +509,6 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *parms);
int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream,
const struct snd_pcm_hardware *hw);
-int snd_soc_platform_trigger(struct snd_pcm_substream *substream,
- int cmd, struct snd_soc_platform *platform);
-
int soc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai);
@@ -785,6 +784,10 @@ struct snd_soc_component_driver {
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
+ /* pcm creation and destruction */
+ int (*pcm_new)(struct snd_soc_pcm_runtime *);
+ void (*pcm_free)(struct snd_pcm *);
+
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
struct of_phandle_args *args,
@@ -813,6 +816,7 @@ struct snd_soc_component {
unsigned int suspended:1; /* is in suspend PM state */
struct list_head list;
+ struct list_head card_aux_list; /* for auxiliary bound components */
struct list_head card_list;
struct snd_soc_dai_driver *dai_drv;
@@ -858,6 +862,8 @@ struct snd_soc_component {
void (*remove)(struct snd_soc_component *);
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
+ int (*pcm_new)(struct snd_soc_pcm_runtime *);
+ void (*pcm_free)(struct snd_pcm *);
/* machine specific init */
int (*init)(struct snd_soc_component *component);
@@ -940,20 +946,11 @@ struct snd_soc_platform_driver {
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
- /*
- * For platform caused delay reporting.
- * Optional.
- */
- snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
- struct snd_soc_dai *);
-
/* platform stream pcm ops */
const struct snd_pcm_ops *ops;
/* platform stream compress ops */
const struct snd_compr_ops *compr_ops;
-
- int (*bespoke_trigger)(struct snd_pcm_substream *, int);
};
struct snd_soc_dai_link_component {
@@ -1098,6 +1095,8 @@ struct snd_soc_card {
const char *name;
const char *long_name;
const char *driver_name;
+ char dmi_longname[80];
+
struct device *dev;
struct snd_card *snd_card;
struct module *owner;
@@ -1152,6 +1151,7 @@ struct snd_soc_card {
*/
struct snd_soc_aux_dev *aux_dev;
int num_aux_devs;
+ struct list_head aux_comp_list;
const struct snd_kcontrol_new *controls;
int num_controls;
@@ -1547,6 +1547,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
INIT_LIST_HEAD(&card->widgets);
INIT_LIST_HEAD(&card->paths);
INIT_LIST_HEAD(&card->dapm_list);
+ INIT_LIST_HEAD(&card->aux_comp_list);
INIT_LIST_HEAD(&card->component_dev_list);
}
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4ac24f5a3308..275581d483dd 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -1,12 +1,14 @@
#ifndef ISCSI_TARGET_CORE_H
#define ISCSI_TARGET_CORE_H
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <scsi/iscsi_proto.h>
-#include <target/target_core_base.h>
+#include <linux/dma-direction.h> /* enum dma_data_direction */
+#include <linux/list.h> /* struct list_head */
+#include <linux/socket.h> /* struct sockaddr_storage */
+#include <linux/types.h> /* u8 */
+#include <scsi/iscsi_proto.h> /* itt_t */
+#include <target/target_core_base.h> /* struct se_cmd */
+
+struct sock;
#define ISCSIT_VERSION "v4.1.0"
#define ISCSI_MAX_DATASN_MISSING_COUNT 16
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index e615bb485d0b..c27dd471656d 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -1,6 +1,10 @@
#ifndef ISCSI_TARGET_STAT_H
#define ISCSI_TARGET_STAT_H
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/socket.h>
+
/*
* For struct iscsi_tiqn->tiqn_wwn default groups
*/
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 40ac7cd80150..1277e9ba0318 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -1,6 +1,6 @@
-#include <linux/module.h>
-#include <linux/list.h>
-#include "iscsi_target_core.h"
+#include "iscsi_target_core.h" /* struct iscsi_cmd */
+
+struct sockaddr_storage;
struct iscsit_transport {
#define ISCSIT_TRANSPORT_NAME 16
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index f6f3bc52c1ac..b54b98dc2d4a 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -1,8 +1,14 @@
#ifndef TARGET_CORE_BACKEND_H
#define TARGET_CORE_BACKEND_H
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define TRANSPORT_FLAG_PASSTHROUGH 1
+struct request_queue;
+struct scatterlist;
+
struct target_backend_ops {
char name[16];
char inquiry_prod[16];
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c2119008990a..29e6858bb164 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -1,14 +1,10 @@
#ifndef TARGET_CORE_BASE_H
#define TARGET_CORE_BASE_H
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
-#include <linux/percpu_ida.h>
-#include <linux/t10-pi.h>
-#include <net/sock.h>
-#include <net/tcp.h>
+#include <linux/configfs.h> /* struct config_group */
+#include <linux/dma-direction.h> /* enum dma_data_direction */
+#include <linux/percpu_ida.h> /* struct percpu_ida */
+#include <linux/semaphore.h> /* struct semaphore */
#define TARGET_CORE_VERSION "v5.0"
@@ -149,7 +145,7 @@ enum se_cmd_flags_table {
* Used by transport_send_check_condition_and_sense()
* to signal which ASC/ASCQ sense payload should be built.
*/
-typedef unsigned __bitwise__ sense_reason_t;
+typedef unsigned __bitwise sense_reason_t;
enum tcm_sense_reason_table {
#define R(x) (__force sense_reason_t )(x)
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 5cd6faa6e0d1..358041bad1da 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -1,6 +1,10 @@
#ifndef TARGET_CORE_FABRIC_H
#define TARGET_CORE_FABRIC_H
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
struct target_core_fabric_ops {
struct module *module;
const char *name;
diff --git a/include/trace/events/alarmtimer.h b/include/trace/events/alarmtimer.h
new file mode 100644
index 000000000000..ae4f358dd8e9
--- /dev/null
+++ b/include/trace/events/alarmtimer.h
@@ -0,0 +1,96 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM alarmtimer
+
+#if !defined(_TRACE_ALARMTIMER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ALARMTIMER_H
+
+#include <linux/alarmtimer.h>
+#include <linux/rtc.h>
+#include <linux/tracepoint.h>
+
+TRACE_DEFINE_ENUM(ALARM_REALTIME);
+TRACE_DEFINE_ENUM(ALARM_BOOTTIME);
+TRACE_DEFINE_ENUM(ALARM_REALTIME_FREEZER);
+TRACE_DEFINE_ENUM(ALARM_BOOTTIME_FREEZER);
+
+#define show_alarm_type(type) __print_flags(type, " | ", \
+ { 1 << ALARM_REALTIME, "REALTIME" }, \
+ { 1 << ALARM_BOOTTIME, "BOOTTIME" }, \
+ { 1 << ALARM_REALTIME_FREEZER, "REALTIME Freezer" }, \
+ { 1 << ALARM_BOOTTIME_FREEZER, "BOOTTIME Freezer" })
+
+TRACE_EVENT(alarmtimer_suspend,
+
+ TP_PROTO(ktime_t expires, int flag),
+
+ TP_ARGS(expires, flag),
+
+ TP_STRUCT__entry(
+ __field(s64, expires)
+ __field(unsigned char, alarm_type)
+ ),
+
+ TP_fast_assign(
+ __entry->expires = expires;
+ __entry->alarm_type = flag;
+ ),
+
+ TP_printk("alarmtimer type:%s expires:%llu",
+ show_alarm_type((1 << __entry->alarm_type)),
+ __entry->expires
+ )
+);
+
+DECLARE_EVENT_CLASS(alarm_class,
+
+ TP_PROTO(struct alarm *alarm, ktime_t now),
+
+ TP_ARGS(alarm, now),
+
+ TP_STRUCT__entry(
+ __field(void *, alarm)
+ __field(unsigned char, alarm_type)
+ __field(s64, expires)
+ __field(s64, now)
+ ),
+
+ TP_fast_assign(
+ __entry->alarm = alarm;
+ __entry->alarm_type = alarm->type;
+ __entry->expires = alarm->node.expires;
+ __entry->now = now;
+ ),
+
+ TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu",
+ __entry->alarm,
+ show_alarm_type((1 << __entry->alarm_type)),
+ __entry->expires,
+ __entry->now
+ )
+);
+
+DEFINE_EVENT(alarm_class, alarmtimer_fired,
+
+ TP_PROTO(struct alarm *alarm, ktime_t now),
+
+ TP_ARGS(alarm, now)
+);
+
+DEFINE_EVENT(alarm_class, alarmtimer_start,
+
+ TP_PROTO(struct alarm *alarm, ktime_t now),
+
+ TP_ARGS(alarm, now)
+);
+
+DEFINE_EVENT(alarm_class, alarmtimer_cancel,
+
+ TP_PROTO(struct alarm *alarm, ktime_t now),
+
+ TP_ARGS(alarm, now)
+);
+
+#endif /* _TRACE_ALARMTIMER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index d336b890e31f..df3e9ae5ad8d 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -27,8 +27,7 @@ DECLARE_EVENT_CLASS(bcache_request,
__entry->sector = bio->bi_iter.bi_sector;
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -102,8 +101,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u",
@@ -138,8 +136,7 @@ TRACE_EVENT(bcache_read,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
__entry->cache_hit = hit;
__entry->bypass = bypass;
),
@@ -170,8 +167,7 @@ TRACE_EVENT(bcache_write,
__entry->inode = inode;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
__entry->writeback = writeback;
__entry->bypass = bypass;
),
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 8f3a163b8166..3e02e3a25413 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -84,8 +84,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
0 : blk_rq_sectors(rq);
__entry->errors = rq->errors;
- blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
- blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq);
),
@@ -163,7 +162,7 @@ TRACE_EVENT(block_rq_complete,
__entry->nr_sector = nr_bytes >> 9;
__entry->errors = rq->errors;
- blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, nr_bytes);
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
blk_dump_cmd(__get_str(cmd), rq);
),
@@ -199,8 +198,7 @@ DECLARE_EVENT_CLASS(block_rq,
__entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
blk_rq_bytes(rq) : 0;
- blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
- blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -274,8 +272,7 @@ TRACE_EVENT(block_bio_bounce,
bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -313,8 +310,7 @@ TRACE_EVENT(block_bio_complete,
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
__entry->error = error;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u [%d]",
@@ -341,8 +337,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -409,8 +404,7 @@ TRACE_EVENT(block_bio_queue,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -438,7 +432,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
__entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
__entry->sector = bio ? bio->bi_iter.bi_sector : 0;
__entry->nr_sector = bio ? bio_sectors(bio) : 0;
- blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
+ blk_fill_rwbs(__entry->rwbs,
bio ? bio->bi_opf : 0, __entry->nr_sector);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -573,8 +567,7 @@ TRACE_EVENT(block_split,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -617,8 +610,7 @@ TRACE_EVENT(block_bio_remap,
__entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
- bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
@@ -664,8 +656,7 @@ TRACE_EVENT(block_rq_remap,
__entry->old_dev = dev;
__entry->old_sector = from;
__entry->nr_bios = blk_rq_count_bios(rq);
- blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
- blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index e030d6f6c19a..c14bed4ab097 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -698,10 +698,10 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
- TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
- TP_ARGS(root, map, offset, size),
+ TP_ARGS(fs_info, map, offset, size),
TP_STRUCT__entry_btrfs(
__field( int, num_stripes )
@@ -712,13 +712,13 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__field( u64, root_objectid )
),
- TP_fast_assign_btrfs(root->fs_info,
+ TP_fast_assign_btrfs(fs_info,
__entry->num_stripes = map->num_stripes;
__entry->type = map->type;
__entry->sub_stripes = map->sub_stripes;
__entry->offset = offset;
__entry->size = size;
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = fs_info->chunk_root->root_key.objectid;
),
TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
@@ -732,18 +732,18 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
- TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
- TP_ARGS(root, map, offset, size)
+ TP_ARGS(fs_info, map, offset, size)
);
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
- TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
- TP_ARGS(root, map, offset, size)
+ TP_ARGS(fs_info, map, offset, size)
);
TRACE_EVENT(btrfs_cow_block,
@@ -891,65 +891,61 @@ TRACE_EVENT(btrfs_flush_space,
DECLARE_EVENT_CLASS(btrfs__reserved_extent,
- TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
- TP_ARGS(root, start, len),
+ TP_ARGS(fs_info, start, len),
TP_STRUCT__entry_btrfs(
- __field( u64, root_objectid )
__field( u64, start )
__field( u64, len )
),
- TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ TP_fast_assign_btrfs(fs_info,
__entry->start = start;
__entry->len = len;
),
TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
- show_root_type(__entry->root_objectid),
+ show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
(unsigned long long)__entry->start,
(unsigned long long)__entry->len)
);
DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
- TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
- TP_ARGS(root, start, len)
+ TP_ARGS(fs_info, start, len)
);
DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
- TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
- TP_ARGS(root, start, len)
+ TP_ARGS(fs_info, start, len)
);
TRACE_EVENT(find_free_extent,
- TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
u64 data),
- TP_ARGS(root, num_bytes, empty_size, data),
+ TP_ARGS(fs_info, num_bytes, empty_size, data),
TP_STRUCT__entry_btrfs(
- __field( u64, root_objectid )
__field( u64, num_bytes )
__field( u64, empty_size )
__field( u64, data )
),
- TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ TP_fast_assign_btrfs(fs_info,
__entry->num_bytes = num_bytes;
__entry->empty_size = empty_size;
__entry->data = data;
),
- TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
- "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
+ TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
+ show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
BTRFS_GROUP_FLAGS))
@@ -957,22 +953,20 @@ TRACE_EVENT(find_free_extent,
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
- TP_PROTO(struct btrfs_root *root,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(root, block_group, start, len),
+ TP_ARGS(fs_info, block_group, start, len),
TP_STRUCT__entry_btrfs(
- __field( u64, root_objectid )
__field( u64, bg_objectid )
__field( u64, flags )
__field( u64, start )
__field( u64, len )
),
- TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ TP_fast_assign_btrfs(fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->flags = block_group->flags;
__entry->start = start;
@@ -981,7 +975,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
"start = %Lu, len = %Lu",
- show_root_type(__entry->root_objectid), __entry->bg_objectid,
+ show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
+ __entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
"|", BTRFS_GROUP_FLAGS),
__entry->start, __entry->len)
@@ -989,20 +984,20 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
- TP_PROTO(struct btrfs_root *root,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(root, block_group, start, len)
+ TP_ARGS(fs_info, block_group, start, len)
);
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
- TP_PROTO(struct btrfs_root *root,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(root, block_group, start, len)
+ TP_ARGS(fs_info, block_group, start, len)
);
TRACE_EVENT(btrfs_find_cluster,
@@ -1406,7 +1401,7 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
TP_ARGS(fs_info, rec)
);
-DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_extent_record *rec),
diff --git a/include/trace/events/fence.h b/include/trace/events/dma_fence.h
index d6dfa05ba322..1157cb4c3c6f 100644
--- a/include/trace/events/fence.h
+++ b/include/trace/events/dma_fence.h
@@ -1,17 +1,17 @@
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM fence
+#define TRACE_SYSTEM dma_fence
#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_FENCE_H
+#define _TRACE_DMA_FENCE_H
#include <linux/tracepoint.h>
-struct fence;
+struct dma_fence;
-TRACE_EVENT(fence_annotate_wait_on,
+TRACE_EVENT(dma_fence_annotate_wait_on,
/* fence: the fence waiting on f1, f1: the fence to be waited on. */
- TP_PROTO(struct fence *fence, struct fence *f1),
+ TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
TP_ARGS(fence, f1),
@@ -48,9 +48,9 @@ TRACE_EVENT(fence_annotate_wait_on,
__entry->waiting_context, __entry->waiting_seqno)
);
-DECLARE_EVENT_CLASS(fence,
+DECLARE_EVENT_CLASS(dma_fence,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence),
@@ -73,56 +73,56 @@ DECLARE_EVENT_CLASS(fence,
__entry->seqno)
);
-DEFINE_EVENT(fence, fence_emit,
+DEFINE_EVENT(dma_fence, dma_fence_emit,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_init,
+DEFINE_EVENT(dma_fence, dma_fence_init,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_destroy,
+DEFINE_EVENT(dma_fence, dma_fence_destroy,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_enable_signal,
+DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_signaled,
+DEFINE_EVENT(dma_fence, dma_fence_signaled,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_wait_start,
+DEFINE_EVENT(dma_fence, dma_fence_wait_start,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_wait_end,
+DEFINE_EVENT(dma_fence, dma_fence_wait_end,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-#endif /* _TRACE_FENCE_H */
+#endif /* _TRACE_DMA_FENCE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 903a09165bb1..01b3c9869a0d 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -32,7 +32,7 @@ TRACE_DEFINE_ENUM(LFS);
TRACE_DEFINE_ENUM(SSR);
TRACE_DEFINE_ENUM(__REQ_RAHEAD);
TRACE_DEFINE_ENUM(__REQ_SYNC);
-TRACE_DEFINE_ENUM(__REQ_NOIDLE);
+TRACE_DEFINE_ENUM(__REQ_IDLE);
TRACE_DEFINE_ENUM(__REQ_PREFLUSH);
TRACE_DEFINE_ENUM(__REQ_FUA);
TRACE_DEFINE_ENUM(__REQ_PRIO);
@@ -55,7 +55,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
-#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA))
+#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
#define show_bio_type(op_flags) show_bio_op_flags(op_flags), \
@@ -65,11 +65,9 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
__print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
{ 0, "WRITE" }, \
{ REQ_RAHEAD, "READAHEAD" }, \
- { READ_SYNC, "READ_SYNC" }, \
- { WRITE_SYNC, "WRITE_SYNC" }, \
- { WRITE_FLUSH, "WRITE_FLUSH" }, \
- { WRITE_FUA, "WRITE_FUA" }, \
- { WRITE_FLUSH_FUA, "WRITE_FLUSH_FUA" })
+ { REQ_SYNC, "REQ_SYNC" }, \
+ { REQ_PREFLUSH, "REQ_PREFLUSH" }, \
+ { REQ_FUA, "REQ_FUA" })
#define show_bio_extra(type) \
__print_symbolic(F2FS_BIO_EXTRA_MASK(type), \
@@ -1113,6 +1111,27 @@ TRACE_EVENT(f2fs_issue_discard,
(unsigned long long)__entry->blklen)
);
+TRACE_EVENT(f2fs_issue_reset_zone,
+
+ TP_PROTO(struct super_block *sb, block_t blkstart),
+
+ TP_ARGS(sb, blkstart),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(block_t, blkstart)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->blkstart = blkstart;
+ ),
+
+ TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ show_dev(__entry),
+ (unsigned long long)__entry->blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct super_block *sb, unsigned int nobarrier,
diff --git a/include/trace/events/i2c.h b/include/trace/events/i2c.h
index fe17187df65d..4abb8eab34d3 100644
--- a/include/trace/events/i2c.h
+++ b/include/trace/events/i2c.h
@@ -20,7 +20,7 @@
/*
* drivers/i2c/i2c-core.c
*/
-extern void i2c_transfer_trace_reg(void);
+extern int i2c_transfer_trace_reg(void);
extern void i2c_transfer_trace_unreg(void);
/*
diff --git a/include/trace/events/mdio.h b/include/trace/events/mdio.h
new file mode 100644
index 000000000000..00d85f5f54e4
--- /dev/null
+++ b/include/trace/events/mdio.h
@@ -0,0 +1,42 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mdio
+
+#if !defined(_TRACE_MDIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MDIO_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(mdio_access,
+
+ TP_PROTO(struct mii_bus *bus, char read,
+ u8 addr, unsigned regnum, u16 val, int err),
+
+ TP_ARGS(bus, read, addr, regnum, val, err),
+
+ TP_CONDITION(err >= 0),
+
+ TP_STRUCT__entry(
+ __array(char, busid, MII_BUS_ID_SIZE)
+ __field(char, read)
+ __field(u8, addr)
+ __field(u16, val)
+ __field(unsigned, regnum)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->busid, bus->id, MII_BUS_ID_SIZE);
+ __entry->read = read;
+ __entry->addr = addr;
+ __entry->regnum = regnum;
+ __entry->val = val;
+ ),
+
+ TP_printk("%s %-5s phy:0x%02hhx reg:0x%02x val:0x%04hx",
+ __entry->busid, __entry->read ? "read" : "write",
+ __entry->addr, __entry->regnum, __entry->val)
+);
+
+#endif /* if !defined(_TRACE_MDIO_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 5a81ab48a2fb..9e687ca9a307 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -81,6 +81,7 @@
#define __def_pageflag_names \
{1UL << PG_locked, "locked" }, \
+ {1UL << PG_waiters, "waiters" }, \
{1UL << PG_error, "error" }, \
{1UL << PG_referenced, "referenced" }, \
{1UL << PG_uptodate, "uptodate" }, \
@@ -95,7 +96,6 @@
{1UL << PG_private_2, "private_2" }, \
{1UL << PG_writeback, "writeback" }, \
{1UL << PG_head, "head" }, \
- {1UL << PG_swapcache, "swapcache" }, \
{1UL << PG_mappedtodisk, "mappedtodisk" }, \
{1UL << PG_reclaim, "reclaim" }, \
{1UL << PG_swapbacked, "swapbacked" }, \
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d3e756539d44..9d4f9b3a2b7b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -698,7 +698,10 @@ TRACE_EVENT(rcu_batch_end,
/*
* Tracepoint for rcutorture readers. The first argument is the name
* of the RCU flavor from rcutorture's viewpoint and the second argument
- * is the callback address.
+ * is the callback address. The third argument is the start time in
+ * seconds, and the last two arguments are the grace period numbers
+ * at the beginning and end of the read, respectively. Note that the
+ * callback address can be NULL.
*/
TRACE_EVENT(rcu_torture_read,
diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
index 7ea4c5e7c448..288c0c54a2b4 100644
--- a/include/trace/events/swiotlb.h
+++ b/include/trace/events/swiotlb.h
@@ -11,16 +11,16 @@ TRACE_EVENT(swiotlb_bounced,
TP_PROTO(struct device *dev,
dma_addr_t dev_addr,
size_t size,
- int swiotlb_force),
+ enum swiotlb_force swiotlb_force),
TP_ARGS(dev, dev_addr, size, swiotlb_force),
TP_STRUCT__entry(
- __string( dev_name, dev_name(dev) )
- __field( u64, dma_mask )
- __field( dma_addr_t, dev_addr )
- __field( size_t, size )
- __field( int, swiotlb_force )
+ __string( dev_name, dev_name(dev) )
+ __field( u64, dma_mask )
+ __field( dma_addr_t, dev_addr )
+ __field( size_t, size )
+ __field( enum swiotlb_force, swiotlb_force )
),
TP_fast_assign(
@@ -37,7 +37,10 @@ TRACE_EVENT(swiotlb_bounced,
__entry->dma_mask,
(unsigned long long)__entry->dev_addr,
__entry->size,
- __entry->swiotlb_force ? "swiotlb_force" : "" )
+ __print_symbolic(__entry->swiotlb_force,
+ { SWIOTLB_NORMAL, "NORMAL" },
+ { SWIOTLB_FORCE, "FORCE" },
+ { SWIOTLB_NO_FORCE, "NO_FORCE" }))
);
#endif /* _TRACE_SWIOTLB_H */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 28c5da6fdfac..1448637616d6 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -177,16 +177,14 @@ TRACE_EVENT(hrtimer_start,
TP_fast_assign(
__entry->hrtimer = hrtimer;
__entry->function = hrtimer->function;
- __entry->expires = hrtimer_get_expires(hrtimer).tv64;
- __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64;
+ __entry->expires = hrtimer_get_expires(hrtimer);
+ __entry->softexpires = hrtimer_get_softexpires(hrtimer);
),
TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
__entry->hrtimer, __entry->function,
- (unsigned long long)ktime_to_ns((ktime_t) {
- .tv64 = __entry->expires }),
- (unsigned long long)ktime_to_ns((ktime_t) {
- .tv64 = __entry->softexpires }))
+ (unsigned long long) __entry->expires,
+ (unsigned long long) __entry->softexpires)
);
/**
@@ -211,13 +209,13 @@ TRACE_EVENT(hrtimer_expire_entry,
TP_fast_assign(
__entry->hrtimer = hrtimer;
- __entry->now = now->tv64;
+ __entry->now = *now;
__entry->function = hrtimer->function;
),
TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
- (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
- );
+ (unsigned long long) __entry->now)
+);
DECLARE_EVENT_CLASS(hrtimer_class,
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
new file mode 100644
index 000000000000..3c518e455680
--- /dev/null
+++ b/include/trace/events/wbt.h
@@ -0,0 +1,153 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wbt
+
+#if !defined(_TRACE_WBT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WBT_H
+
+#include <linux/tracepoint.h>
+#include "../../../block/blk-wbt.h"
+
+/**
+ * wbt_stat - trace stats for blk_wb
+ * @stat: array of read/write stats
+ */
+TRACE_EVENT(wbt_stat,
+
+ TP_PROTO(struct backing_dev_info *bdi, struct blk_rq_stat *stat),
+
+ TP_ARGS(bdi, stat),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(s64, rmean)
+ __field(u64, rmin)
+ __field(u64, rmax)
+ __field(s64, rnr_samples)
+ __field(s64, rtime)
+ __field(s64, wmean)
+ __field(u64, wmin)
+ __field(u64, wmax)
+ __field(s64, wnr_samples)
+ __field(s64, wtime)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->rmean = stat[0].mean;
+ __entry->rmin = stat[0].min;
+ __entry->rmax = stat[0].max;
+ __entry->rnr_samples = stat[0].nr_samples;
+ __entry->wmean = stat[1].mean;
+ __entry->wmin = stat[1].min;
+ __entry->wmax = stat[1].max;
+ __entry->wnr_samples = stat[1].nr_samples;
+ ),
+
+ TP_printk("%s: rmean=%llu, rmin=%llu, rmax=%llu, rsamples=%llu, "
+ "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu\n",
+ __entry->name, __entry->rmean, __entry->rmin, __entry->rmax,
+ __entry->rnr_samples, __entry->wmean, __entry->wmin,
+ __entry->wmax, __entry->wnr_samples)
+);
+
+/**
+ * wbt_lat - trace latency event
+ * @lat: latency trigger
+ */
+TRACE_EVENT(wbt_lat,
+
+ TP_PROTO(struct backing_dev_info *bdi, unsigned long lat),
+
+ TP_ARGS(bdi, lat),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(unsigned long, lat)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->lat = div_u64(lat, 1000);
+ ),
+
+ TP_printk("%s: latency %lluus\n", __entry->name,
+ (unsigned long long) __entry->lat)
+);
+
+/**
+ * wbt_step - trace wb event step
+ * @msg: context message
+ * @step: the current scale step count
+ * @window: the current monitoring window
+ * @bg: the current background queue limit
+ * @normal: the current normal writeback limit
+ * @max: the current max throughput writeback limit
+ */
+TRACE_EVENT(wbt_step,
+
+ TP_PROTO(struct backing_dev_info *bdi, const char *msg,
+ int step, unsigned long window, unsigned int bg,
+ unsigned int normal, unsigned int max),
+
+ TP_ARGS(bdi, msg, step, window, bg, normal, max),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(const char *, msg)
+ __field(int, step)
+ __field(unsigned long, window)
+ __field(unsigned int, bg)
+ __field(unsigned int, normal)
+ __field(unsigned int, max)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->msg = msg;
+ __entry->step = step;
+ __entry->window = div_u64(window, 1000);
+ __entry->bg = bg;
+ __entry->normal = normal;
+ __entry->max = max;
+ ),
+
+ TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u\n",
+ __entry->name, __entry->msg, __entry->step, __entry->window,
+ __entry->bg, __entry->normal, __entry->max)
+);
+
+/**
+ * wbt_timer - trace wb timer event
+ * @status: timer state status
+ * @step: the current scale step count
+ * @inflight: tracked writes inflight
+ */
+TRACE_EVENT(wbt_timer,
+
+ TP_PROTO(struct backing_dev_info *bdi, unsigned int status,
+ int step, unsigned int inflight),
+
+ TP_ARGS(bdi, status, step, inflight),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(unsigned int, status)
+ __field(int, step)
+ __field(unsigned int, inflight)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->status = status;
+ __entry->step = step;
+ __entry->inflight = inflight;
+ ),
+
+ TP_printk("%s: status=%u, step=%d, inflight=%u\n", __entry->name,
+ __entry->status, __entry->step, __entry->inflight)
+);
+
+#endif /* _TRACE_WBT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 67d632f1743d..2c748ddad5f8 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -92,4 +92,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index d6b5a21f3d3c..396183628f3c 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -50,6 +50,7 @@ extern "C" {
#define DRM_AMDGPU_WAIT_CS 0x09
#define DRM_AMDGPU_GEM_OP 0x10
#define DRM_AMDGPU_GEM_USERPTR 0x11
+#define DRM_AMDGPU_WAIT_FENCES 0x12
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +64,7 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -81,6 +83,8 @@ extern "C" {
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
/* Flag that create shadow bo(GTT) while allocating vram bo */
#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
+/* Flag that allocating the BO should use linear VRAM */
+#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -305,6 +309,32 @@ union drm_amdgpu_wait_cs {
struct drm_amdgpu_wait_cs_out out;
};
+struct drm_amdgpu_fence {
+ __u32 ctx_id;
+ __u32 ip_type;
+ __u32 ip_instance;
+ __u32 ring;
+ __u64 seq_no;
+};
+
+struct drm_amdgpu_wait_fences_in {
+ /** This points to uint64_t * which points to fences */
+ __u64 fences;
+ __u32 fence_count;
+ __u32 wait_all;
+ __u64 timeout_ns;
+};
+
+struct drm_amdgpu_wait_fences_out {
+ __u32 status;
+ __u32 first_signaled;
+};
+
+union drm_amdgpu_wait_fences {
+ struct drm_amdgpu_wait_fences_in in;
+ struct drm_amdgpu_wait_fences_out out;
+};
+
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
@@ -436,6 +466,7 @@ struct drm_amdgpu_cs_chunk_data {
*
*/
#define AMDGPU_IDS_FLAGS_FUSION 0x1
+#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -487,6 +518,16 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
/* number of TTM buffer evictions */
#define AMDGPU_INFO_NUM_EVICTIONS 0x18
+/* Query memory about VRAM and GTT domains */
+#define AMDGPU_INFO_MEMORY 0x19
+/* Query vce clock table */
+#define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
+/* Query vbios related information */
+#define AMDGPU_INFO_VBIOS 0x1B
+ /* Subquery id: Query vbios size */
+ #define AMDGPU_INFO_VBIOS_SIZE 0x1
+ /* Subquery id: Query vbios image */
+ #define AMDGPU_INFO_VBIOS_IMAGE 0x2
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -545,6 +586,11 @@ struct drm_amdgpu_info {
} read_mmr_reg;
struct drm_amdgpu_query_fw query_fw;
+
+ struct {
+ __u32 type;
+ __u32 offset;
+ } vbios_info;
};
};
@@ -572,6 +618,34 @@ struct drm_amdgpu_info_vram_gtt {
__u64 gtt_size;
};
+struct drm_amdgpu_heap_info {
+ /** max. physical memory */
+ __u64 total_heap_size;
+
+ /** Theoretical max. available memory in the given heap */
+ __u64 usable_heap_size;
+
+ /**
+ * Number of bytes allocated in the heap. This includes all processes
+ * and private allocations in the kernel. It changes when new buffers
+ * are allocated, freed, and moved. It cannot be larger than
+ * heap_size.
+ */
+ __u64 heap_usage;
+
+ /**
+ * Theoretical possible max. size of buffer which
+ * could be allocated in the given heap
+ */
+ __u64 max_allocation;
+};
+
+struct drm_amdgpu_memory_info {
+ struct drm_amdgpu_heap_info vram;
+ struct drm_amdgpu_heap_info cpu_accessible_vram;
+ struct drm_amdgpu_heap_info gtt;
+};
+
struct drm_amdgpu_info_firmware {
__u32 ver;
__u32 feature;
@@ -645,6 +719,24 @@ struct drm_amdgpu_info_hw_ip {
__u32 _pad;
};
+#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
+
+struct drm_amdgpu_info_vce_clock_table_entry {
+ /** System clock */
+ __u32 sclk;
+ /** Memory clock */
+ __u32 mclk;
+ /** VCE clock */
+ __u32 eclk;
+ __u32 pad;
+};
+
+struct drm_amdgpu_info_vce_clock_table {
+ struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
+ __u32 num_valid_entries;
+ __u32 pad;
+};
+
/*
* Supported GPU families
*/
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index df0e3504c349..ce7efe2e8a5e 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -47,7 +47,15 @@ extern "C" {
#define DRM_MODE_TYPE_DRIVER (1<<6)
/* Video mode flags */
-/* bit compatible with the xorg definitions. */
+/* bit compatible with the xrandr RR_ definitions (bits 0-13)
+ *
+ * ABI warning: Existing userspace really expects
+ * the mode flags to match the xrandr definitions. Any
+ * changes that don't match the xrandr definitions will
+ * likely need a new client cap or some other mechanism
+ * to avoid breaking existing userspace. This includes
+ * allocating new flags in the previously unused bits!
+ */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
@@ -77,6 +85,19 @@ extern "C" {
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE 0
+#define DRM_MODE_PICTURE_ASPECT_4_3 1
+#define DRM_MODE_PICTURE_ASPECT_16_9 2
+
+/* Aspect ratio flag bitmask (4 bits 22:19) */
+#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
+#define DRM_MODE_FLAG_PIC_AR_NONE \
+ (DRM_MODE_PICTURE_ASPECT_NONE<<19)
+#define DRM_MODE_FLAG_PIC_AR_4_3 \
+ (DRM_MODE_PICTURE_ASPECT_4_3<<19)
+#define DRM_MODE_FLAG_PIC_AR_16_9 \
+ (DRM_MODE_PICTURE_ASPECT_16_9<<19)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
@@ -92,11 +113,6 @@ extern "C" {
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
-/* Picture aspect ratio options */
-#define DRM_MODE_PICTURE_ASPECT_NONE 0
-#define DRM_MODE_PICTURE_ASPECT_4_3 1
-#define DRM_MODE_PICTURE_ASPECT_16_9 2
-
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
@@ -220,14 +236,16 @@ struct drm_mode_get_encoder {
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
-#define DRM_MODE_SUBCONNECTOR_Automatic 0
-#define DRM_MODE_SUBCONNECTOR_Unknown 0
-#define DRM_MODE_SUBCONNECTOR_DVID 3
-#define DRM_MODE_SUBCONNECTOR_DVIA 4
-#define DRM_MODE_SUBCONNECTOR_Composite 5
-#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
-#define DRM_MODE_SUBCONNECTOR_Component 8
-#define DRM_MODE_SUBCONNECTOR_SCART 9
+enum drm_mode_subconnector {
+ DRM_MODE_SUBCONNECTOR_Automatic = 0,
+ DRM_MODE_SUBCONNECTOR_Unknown = 0,
+ DRM_MODE_SUBCONNECTOR_DVID = 3,
+ DRM_MODE_SUBCONNECTOR_DVIA = 4,
+ DRM_MODE_SUBCONNECTOR_Composite = 5,
+ DRM_MODE_SUBCONNECTOR_SVIDEO = 6,
+ DRM_MODE_SUBCONNECTOR_Component = 8,
+ DRM_MODE_SUBCONNECTOR_SCART = 9,
+};
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
@@ -392,17 +410,20 @@ struct drm_mode_fb_cmd2 {
* offsets[1]. Note that offsets[0] will generally
* be 0 (but this is not required).
*
- * To accommodate tiled, compressed, etc formats, a per-plane
+ * To accommodate tiled, compressed, etc formats, a
* modifier can be specified. The default value of zero
* indicates "native" format as specified by the fourcc.
- * Vendor specific modifier token. This allows, for example,
- * different tiling/swizzling pattern on different planes.
- * See discussion above of DRM_FORMAT_MOD_xxx.
+ * Vendor specific modifier token. Note that even though
+ * it looks like we have a modifier per-plane, we in fact
+ * do not. The modifier for each plane must be identical.
+ * Thus all combinations of different data layouts for
+ * multi plane formats must be enumerated as separate
+ * modifiers.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
- __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
+ __u64 modifier[4]; /* ie, tiling, compress */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 03725fe89859..1c12a350eca3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -389,6 +389,11 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_MIN_EU_IN_POOL 39
#define I915_PARAM_MMAP_GTT_VERSION 40
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+ * priorities and the driver will attempt to execute batches in priority order.
+ */
+#define I915_PARAM_HAS_SCHEDULER 41
+
typedef struct drm_i915_getparam {
__s32 param;
/*
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 8c51e8a0df89..4d5d6a2bc59e 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -2,17 +2,24 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef __MSM_DRM_H__
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index ad7edc3edf7c..f07a09016726 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -286,6 +286,8 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_V3D_IDENT1 1
#define DRM_VC4_PARAM_V3D_IDENT2 2
#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
+#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
+#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
struct drm_vc4_get_param {
__u32 param;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index cd2be1c8e9fb..a8b93e685239 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -70,6 +70,7 @@ header-y += bfs_fs.h
header-y += binfmts.h
header-y += blkpg.h
header-y += blktrace_api.h
+header-y += blkzoned.h
header-y += bpf_common.h
header-y += bpf_perf_event.h
header-y += bpf.h
@@ -83,6 +84,8 @@ header-y += capi.h
header-y += cciss_defs.h
header-y += cciss_ioctl.h
header-y += cdrom.h
+header-y += cec.h
+header-y += cec-funcs.h
header-y += cgroupstats.h
header-y += chio.h
header-y += cm4000_cs.h
@@ -426,6 +429,7 @@ header-y += udp.h
header-y += uhid.h
header-y += uinput.h
header-y += uio.h
+header-y += uleds.h
header-y += ultrasound.h
header-y += un.h
header-y += unistd.h
@@ -460,6 +464,7 @@ header-y += virtio_rng.h
header-y += virtio_scsi.h
header-y += virtio_types.h
header-y += virtio_vsock.h
+header-y += virtio_crypto.h
header-y += vm_sockets.h
header-y += vt.h
header-y += vtpm_proxy.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 208df7b44e90..1c107cb1c83f 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -254,6 +254,7 @@
#define AUDIT_OBJ_LEV_LOW 22
#define AUDIT_OBJ_LEV_HIGH 23
#define AUDIT_LOGINUID_SET 24
+#define AUDIT_SESSIONID 25 /* Session ID */
/* These are ONLY useful when checking
* at syscall exit time (AUDIT_AT_EXIT). */
@@ -330,10 +331,12 @@ enum {
#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
#define AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH 0x00000004
#define AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND 0x00000008
+#define AUDIT_FEATURE_BITMAP_SESSIONID_FILTER 0x00000010
#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH | \
- AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND)
+ AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND | \
+ AUDIT_FEATURE_BITMAP_SESSIONID_FILTER)
/* deprecated: AUDIT_VERSION_* */
#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
new file mode 100644
index 000000000000..40d1d7bff537
--- /dev/null
+++ b/include/uapi/linux/blkzoned.h
@@ -0,0 +1,143 @@
+/*
+ * Zoned block devices handling.
+ *
+ * Copyright (C) 2015 Seagate Technology PLC
+ *
+ * Written by: Shaun Tancheff <shaun.tancheff@seagate.com>
+ *
+ * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
+ * Copyright (C) 2016 Western Digital
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#ifndef _UAPI_BLKZONED_H
+#define _UAPI_BLKZONED_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * enum blk_zone_type - Types of zones allowed in a zoned device.
+ *
+ * @BLK_ZONE_TYPE_CONVENTIONAL: The zone has no write pointer and can be writen
+ * randomly. Zone reset has no effect on the zone.
+ * @BLK_ZONE_TYPE_SEQWRITE_REQ: The zone must be written sequentially
+ * @BLK_ZONE_TYPE_SEQWRITE_PREF: The zone can be written non-sequentially
+ *
+ * Any other value not defined is reserved and must be considered as invalid.
+ */
+enum blk_zone_type {
+ BLK_ZONE_TYPE_CONVENTIONAL = 0x1,
+ BLK_ZONE_TYPE_SEQWRITE_REQ = 0x2,
+ BLK_ZONE_TYPE_SEQWRITE_PREF = 0x3,
+};
+
+/**
+ * enum blk_zone_cond - Condition [state] of a zone in a zoned device.
+ *
+ * @BLK_ZONE_COND_NOT_WP: The zone has no write pointer, it is conventional.
+ * @BLK_ZONE_COND_EMPTY: The zone is empty.
+ * @BLK_ZONE_COND_IMP_OPEN: The zone is open, but not explicitly opened.
+ * @BLK_ZONE_COND_EXP_OPEN: The zones was explicitly opened by an
+ * OPEN ZONE command.
+ * @BLK_ZONE_COND_CLOSED: The zone was [explicitly] closed after writing.
+ * @BLK_ZONE_COND_FULL: The zone is marked as full, possibly by a zone
+ * FINISH ZONE command.
+ * @BLK_ZONE_COND_READONLY: The zone is read-only.
+ * @BLK_ZONE_COND_OFFLINE: The zone is offline (sectors cannot be read/written).
+ *
+ * The Zone Condition state machine in the ZBC/ZAC standards maps the above
+ * deinitions as:
+ * - ZC1: Empty | BLK_ZONE_EMPTY
+ * - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
+ * - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
+ * - ZC4: Closed | BLK_ZONE_CLOSED
+ * - ZC5: Full | BLK_ZONE_FULL
+ * - ZC6: Read Only | BLK_ZONE_READONLY
+ * - ZC7: Offline | BLK_ZONE_OFFLINE
+ *
+ * Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
+ * be considered invalid.
+ */
+enum blk_zone_cond {
+ BLK_ZONE_COND_NOT_WP = 0x0,
+ BLK_ZONE_COND_EMPTY = 0x1,
+ BLK_ZONE_COND_IMP_OPEN = 0x2,
+ BLK_ZONE_COND_EXP_OPEN = 0x3,
+ BLK_ZONE_COND_CLOSED = 0x4,
+ BLK_ZONE_COND_READONLY = 0xD,
+ BLK_ZONE_COND_FULL = 0xE,
+ BLK_ZONE_COND_OFFLINE = 0xF,
+};
+
+/**
+ * struct blk_zone - Zone descriptor for BLKREPORTZONE ioctl.
+ *
+ * @start: Zone start in 512 B sector units
+ * @len: Zone length in 512 B sector units
+ * @wp: Zone write pointer location in 512 B sector units
+ * @type: see enum blk_zone_type for possible values
+ * @cond: see enum blk_zone_cond for possible values
+ * @non_seq: Flag indicating that the zone is using non-sequential resources
+ * (for host-aware zoned block devices only).
+ * @reset: Flag indicating that a zone reset is recommended.
+ * @reserved: Padding to 64 B to match the ZBC/ZAC defined zone descriptor size.
+ *
+ * start, len and wp use the regular 512 B sector unit, regardless of the
+ * device logical block size. The overall structure size is 64 B to match the
+ * ZBC/ZAC defined zone descriptor and allow support for future additional
+ * zone information.
+ */
+struct blk_zone {
+ __u64 start; /* Zone start sector */
+ __u64 len; /* Zone length in number of sectors */
+ __u64 wp; /* Zone write pointer position */
+ __u8 type; /* Zone type */
+ __u8 cond; /* Zone condition */
+ __u8 non_seq; /* Non-sequential write resources active */
+ __u8 reset; /* Reset write pointer recommended */
+ __u8 reserved[36];
+};
+
+/**
+ * struct blk_zone_report - BLKREPORTZONE ioctl request/reply
+ *
+ * @sector: starting sector of report
+ * @nr_zones: IN maximum / OUT actual
+ * @reserved: padding to 16 byte alignment
+ * @zones: Space to hold @nr_zones @zones entries on reply.
+ *
+ * The array of at most @nr_zones must follow this structure in memory.
+ */
+struct blk_zone_report {
+ __u64 sector;
+ __u32 nr_zones;
+ __u8 reserved[4];
+ struct blk_zone zones[0];
+} __packed;
+
+/**
+ * struct blk_zone_range - BLKRESETZONE ioctl request
+ * @sector: starting sector of the first zone to issue reset write pointer
+ * @nr_sectors: Total number of sectors of 1 or more zones to reset
+ */
+struct blk_zone_range {
+ __u64 sector;
+ __u64 nr_sectors;
+};
+
+/**
+ * Zoned block device ioctl's:
+ *
+ * @BLKREPORTZONE: Get zone information. Takes a zone report as argument.
+ * The zone report will start from the zone containing the
+ * sector specified in the report request structure.
+ * @BLKRESETZONE: Reset the write pointer of the zones in the specified
+ * sector range. The sector range must be zone aligned.
+ */
+#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
+#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
+
+#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f09c70b97eca..0eb0e87dbe9f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -73,6 +73,8 @@ enum bpf_cmd {
BPF_PROG_LOAD,
BPF_OBJ_PIN,
BPF_OBJ_GET,
+ BPF_PROG_ATTACH,
+ BPF_PROG_DETACH,
};
enum bpf_map_type {
@@ -85,6 +87,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_ARRAY,
BPF_MAP_TYPE_STACK_TRACE,
BPF_MAP_TYPE_CGROUP_ARRAY,
+ BPF_MAP_TYPE_LRU_HASH,
+ BPF_MAP_TYPE_LRU_PERCPU_HASH,
};
enum bpf_prog_type {
@@ -96,8 +100,22 @@ enum bpf_prog_type {
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
BPF_PROG_TYPE_PERF_EVENT,
+ BPF_PROG_TYPE_CGROUP_SKB,
+ BPF_PROG_TYPE_CGROUP_SOCK,
+ BPF_PROG_TYPE_LWT_IN,
+ BPF_PROG_TYPE_LWT_OUT,
+ BPF_PROG_TYPE_LWT_XMIT,
};
+enum bpf_attach_type {
+ BPF_CGROUP_INET_INGRESS,
+ BPF_CGROUP_INET_EGRESS,
+ BPF_CGROUP_INET_SOCK_CREATE,
+ __MAX_BPF_ATTACH_TYPE
+};
+
+#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+
#define BPF_PSEUDO_MAP_FD 1
/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -106,6 +124,13 @@ enum bpf_prog_type {
#define BPF_EXIST 2 /* update existing element */
#define BPF_F_NO_PREALLOC (1U << 0)
+/* Instead of having one common LRU list in the
+ * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
+ * which can scale and perform better.
+ * Note, the LRU nodes (including free nodes) cannot be moved
+ * across different LRU lists.
+ */
+#define BPF_F_NO_COMMON_LRU (1U << 1)
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
@@ -141,293 +166,327 @@ union bpf_attr {
__aligned_u64 pathname;
__u32 bpf_fd;
};
+
+ struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+ __u32 target_fd; /* container object to attach to */
+ __u32 attach_bpf_fd; /* eBPF program to attach */
+ __u32 attach_type;
+ };
} __attribute__((aligned(8)));
+/* BPF helper function descriptions:
+ *
+ * void *bpf_map_lookup_elem(&map, &key)
+ * Return: Map value or NULL
+ *
+ * int bpf_map_update_elem(&map, &key, &value, flags)
+ * Return: 0 on success or negative error
+ *
+ * int bpf_map_delete_elem(&map, &key)
+ * Return: 0 on success or negative error
+ *
+ * int bpf_probe_read(void *dst, int size, void *src)
+ * Return: 0 on success or negative error
+ *
+ * u64 bpf_ktime_get_ns(void)
+ * Return: current ktime
+ *
+ * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
+ * Return: length of buffer written or negative error
+ *
+ * u32 bpf_prandom_u32(void)
+ * Return: random value
+ *
+ * u32 bpf_raw_smp_processor_id(void)
+ * Return: SMP processor ID
+ *
+ * int bpf_skb_store_bytes(skb, offset, from, len, flags)
+ * store bytes into packet
+ * @skb: pointer to skb
+ * @offset: offset within packet from skb->mac_header
+ * @from: pointer where to copy bytes from
+ * @len: number of bytes to store into packet
+ * @flags: bit 0 - if true, recompute skb->csum
+ * other bits - reserved
+ * Return: 0 on success or negative error
+ *
+ * int bpf_l3_csum_replace(skb, offset, from, to, flags)
+ * recompute IP checksum
+ * @skb: pointer to skb
+ * @offset: offset within packet where IP checksum is located
+ * @from: old value of header field
+ * @to: new value of header field
+ * @flags: bits 0-3 - size of header field
+ * other bits - reserved
+ * Return: 0 on success or negative error
+ *
+ * int bpf_l4_csum_replace(skb, offset, from, to, flags)
+ * recompute TCP/UDP checksum
+ * @skb: pointer to skb
+ * @offset: offset within packet where TCP/UDP checksum is located
+ * @from: old value of header field
+ * @to: new value of header field
+ * @flags: bits 0-3 - size of header field
+ * bit 4 - is pseudo header
+ * other bits - reserved
+ * Return: 0 on success or negative error
+ *
+ * int bpf_tail_call(ctx, prog_array_map, index)
+ * jump into another BPF program
+ * @ctx: context pointer passed to next program
+ * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+ * @index: index inside array that selects specific program to run
+ * Return: 0 on success or negative error
+ *
+ * int bpf_clone_redirect(skb, ifindex, flags)
+ * redirect to another netdev
+ * @skb: pointer to skb
+ * @ifindex: ifindex of the net device
+ * @flags: bit 0 - if set, redirect to ingress instead of egress
+ * other bits - reserved
+ * Return: 0 on success or negative error
+ *
+ * u64 bpf_get_current_pid_tgid(void)
+ * Return: current->tgid << 32 | current->pid
+ *
+ * u64 bpf_get_current_uid_gid(void)
+ * Return: current_gid << 32 | current_uid
+ *
+ * int bpf_get_current_comm(char *buf, int size_of_buf)
+ * stores current->comm into buf
+ * Return: 0 on success or negative error
+ *
+ * u32 bpf_get_cgroup_classid(skb)
+ * retrieve a proc's classid
+ * @skb: pointer to skb
+ * Return: classid if != 0
+ *
+ * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
+ * Return: 0 on success or negative error
+ *
+ * int bpf_skb_vlan_pop(skb)
+ * Return: 0 on success or negative error
+ *
+ * int bpf_skb_get_tunnel_key(skb, key, size, flags)
+ * int bpf_skb_set_tunnel_key(skb, key, size, flags)
+ * retrieve or populate tunnel metadata
+ * @skb: pointer to skb
+ * @key: pointer to 'struct bpf_tunnel_key'
+ * @size: size of 'struct bpf_tunnel_key'
+ * @flags: room for future extensions
+ * Return: 0 on success or negative error
+ *
+ * u64 bpf_perf_event_read(&map, index)
+ * Return: Number events read or error code
+ *
+ * int bpf_redirect(ifindex, flags)
+ * redirect to another netdev
+ * @ifindex: ifindex of the net device
+ * @flags: bit 0 - if set, redirect to ingress instead of egress
+ * other bits - reserved
+ * Return: TC_ACT_REDIRECT
+ *
+ * u32 bpf_get_route_realm(skb)
+ * retrieve a dst's tclassid
+ * @skb: pointer to skb
+ * Return: realm if != 0
+ *
+ * int bpf_perf_event_output(ctx, map, index, data, size)
+ * output perf raw sample
+ * @ctx: struct pt_regs*
+ * @map: pointer to perf_event_array map
+ * @index: index of event in the map
+ * @data: data on stack to be output as raw data
+ * @size: size of data
+ * Return: 0 on success or negative error
+ *
+ * int bpf_get_stackid(ctx, map, flags)
+ * walk user or kernel stack and return id
+ * @ctx: struct pt_regs*
+ * @map: pointer to stack_trace map
+ * @flags: bits 0-7 - numer of stack frames to skip
+ * bit 8 - collect user stack instead of kernel
+ * bit 9 - compare stacks by hash only
+ * bit 10 - if two different stacks hash into the same stackid
+ * discard old
+ * other bits - reserved
+ * Return: >= 0 stackid on success or negative error
+ *
+ * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
+ * calculate csum diff
+ * @from: raw from buffer
+ * @from_size: length of from buffer
+ * @to: raw to buffer
+ * @to_size: length of to buffer
+ * @seed: optional seed
+ * Return: csum result or negative error code
+ *
+ * int bpf_skb_get_tunnel_opt(skb, opt, size)
+ * retrieve tunnel options metadata
+ * @skb: pointer to skb
+ * @opt: pointer to raw tunnel option data
+ * @size: size of @opt
+ * Return: option size
+ *
+ * int bpf_skb_set_tunnel_opt(skb, opt, size)
+ * populate tunnel options metadata
+ * @skb: pointer to skb
+ * @opt: pointer to raw tunnel option data
+ * @size: size of @opt
+ * Return: 0 on success or negative error
+ *
+ * int bpf_skb_change_proto(skb, proto, flags)
+ * Change protocol of the skb. Currently supported is v4 -> v6,
+ * v6 -> v4 transitions. The helper will also resize the skb. eBPF
+ * program is expected to fill the new headers via skb_store_bytes
+ * and lX_csum_replace.
+ * @skb: pointer to skb
+ * @proto: new skb->protocol type
+ * @flags: reserved
+ * Return: 0 on success or negative error
+ *
+ * int bpf_skb_change_type(skb, type)
+ * Change packet type of skb.
+ * @skb: pointer to skb
+ * @type: new skb->pkt_type type
+ * Return: 0 on success or negative error
+ *
+ * int bpf_skb_under_cgroup(skb, map, index)
+ * Check cgroup2 membership of skb
+ * @skb: pointer to skb
+ * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ * @index: index of the cgroup in the bpf_map
+ * Return:
+ * == 0 skb failed the cgroup2 descendant test
+ * == 1 skb succeeded the cgroup2 descendant test
+ * < 0 error
+ *
+ * u32 bpf_get_hash_recalc(skb)
+ * Retrieve and possibly recalculate skb->hash.
+ * @skb: pointer to skb
+ * Return: hash
+ *
+ * u64 bpf_get_current_task(void)
+ * Returns current task_struct
+ * Return: current
+ *
+ * int bpf_probe_write_user(void *dst, void *src, int len)
+ * safely attempt to write to a location
+ * @dst: destination address in userspace
+ * @src: source address on stack
+ * @len: number of bytes to copy
+ * Return: 0 on success or negative error
+ *
+ * int bpf_current_task_under_cgroup(map, index)
+ * Check cgroup2 membership of current task
+ * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ * @index: index of the cgroup in the bpf_map
+ * Return:
+ * == 0 current failed the cgroup2 descendant test
+ * == 1 current succeeded the cgroup2 descendant test
+ * < 0 error
+ *
+ * int bpf_skb_change_tail(skb, len, flags)
+ * The helper will resize the skb to the given new size, to be used f.e.
+ * with control messages.
+ * @skb: pointer to skb
+ * @len: new skb length
+ * @flags: reserved
+ * Return: 0 on success or negative error
+ *
+ * int bpf_skb_pull_data(skb, len)
+ * The helper will pull in non-linear data in case the skb is non-linear
+ * and not all of len are part of the linear section. Only needed for
+ * read/write with direct packet access.
+ * @skb: pointer to skb
+ * @len: len to make read/writeable
+ * Return: 0 on success or negative error
+ *
+ * s64 bpf_csum_update(skb, csum)
+ * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ * @skb: pointer to skb
+ * @csum: csum to add
+ * Return: csum on success or negative error
+ *
+ * void bpf_set_hash_invalid(skb)
+ * Invalidate current skb->hash.
+ * @skb: pointer to skb
+ *
+ * int bpf_get_numa_node_id()
+ * Return: Id of current NUMA node.
+ *
+ * int bpf_skb_change_head()
+ * Grows headroom of skb and adjusts MAC header offset accordingly.
+ * Will extends/reallocae as required automatically.
+ * May change skb data pointer and will thus invalidate any check
+ * performed for direct packet access.
+ * @skb: pointer to skb
+ * @len: length of header to be pushed in front
+ * @flags: Flags (unused for now)
+ * Return: 0 on success or negative error
+ *
+ * int bpf_xdp_adjust_head(xdp_md, delta)
+ * Adjust the xdp_md.data by delta
+ * @xdp_md: pointer to xdp_md
+ * @delta: An positive/negative integer to be added to xdp_md.data
+ * Return: 0 on success or negative on error
+ */
+#define __BPF_FUNC_MAPPER(FN) \
+ FN(unspec), \
+ FN(map_lookup_elem), \
+ FN(map_update_elem), \
+ FN(map_delete_elem), \
+ FN(probe_read), \
+ FN(ktime_get_ns), \
+ FN(trace_printk), \
+ FN(get_prandom_u32), \
+ FN(get_smp_processor_id), \
+ FN(skb_store_bytes), \
+ FN(l3_csum_replace), \
+ FN(l4_csum_replace), \
+ FN(tail_call), \
+ FN(clone_redirect), \
+ FN(get_current_pid_tgid), \
+ FN(get_current_uid_gid), \
+ FN(get_current_comm), \
+ FN(get_cgroup_classid), \
+ FN(skb_vlan_push), \
+ FN(skb_vlan_pop), \
+ FN(skb_get_tunnel_key), \
+ FN(skb_set_tunnel_key), \
+ FN(perf_event_read), \
+ FN(redirect), \
+ FN(get_route_realm), \
+ FN(perf_event_output), \
+ FN(skb_load_bytes), \
+ FN(get_stackid), \
+ FN(csum_diff), \
+ FN(skb_get_tunnel_opt), \
+ FN(skb_set_tunnel_opt), \
+ FN(skb_change_proto), \
+ FN(skb_change_type), \
+ FN(skb_under_cgroup), \
+ FN(get_hash_recalc), \
+ FN(get_current_task), \
+ FN(probe_write_user), \
+ FN(current_task_under_cgroup), \
+ FN(skb_change_tail), \
+ FN(skb_pull_data), \
+ FN(csum_update), \
+ FN(set_hash_invalid), \
+ FN(get_numa_node_id), \
+ FN(skb_change_head), \
+ FN(xdp_adjust_head),
+
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
*/
+#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
enum bpf_func_id {
- BPF_FUNC_unspec,
- BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
- BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
- BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
- BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */
- BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */
- BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
- BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
- BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
-
- /**
- * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
- * @skb: pointer to skb
- * @offset: offset within packet from skb->mac_header
- * @from: pointer where to copy bytes from
- * @len: number of bytes to store into packet
- * @flags: bit 0 - if true, recompute skb->csum
- * other bits - reserved
- * Return: 0 on success
- */
- BPF_FUNC_skb_store_bytes,
-
- /**
- * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
- * @skb: pointer to skb
- * @offset: offset within packet where IP checksum is located
- * @from: old value of header field
- * @to: new value of header field
- * @flags: bits 0-3 - size of header field
- * other bits - reserved
- * Return: 0 on success
- */
- BPF_FUNC_l3_csum_replace,
-
- /**
- * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
- * @skb: pointer to skb
- * @offset: offset within packet where TCP/UDP checksum is located
- * @from: old value of header field
- * @to: new value of header field
- * @flags: bits 0-3 - size of header field
- * bit 4 - is pseudo header
- * other bits - reserved
- * Return: 0 on success
- */
- BPF_FUNC_l4_csum_replace,
-
- /**
- * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
- * @ctx: context pointer passed to next program
- * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
- * @index: index inside array that selects specific program to run
- * Return: 0 on success
- */
- BPF_FUNC_tail_call,
-
- /**
- * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
- * @skb: pointer to skb
- * @ifindex: ifindex of the net device
- * @flags: bit 0 - if set, redirect to ingress instead of egress
- * other bits - reserved
- * Return: 0 on success
- */
- BPF_FUNC_clone_redirect,
-
- /**
- * u64 bpf_get_current_pid_tgid(void)
- * Return: current->tgid << 32 | current->pid
- */
- BPF_FUNC_get_current_pid_tgid,
-
- /**
- * u64 bpf_get_current_uid_gid(void)
- * Return: current_gid << 32 | current_uid
- */
- BPF_FUNC_get_current_uid_gid,
-
- /**
- * bpf_get_current_comm(char *buf, int size_of_buf)
- * stores current->comm into buf
- * Return: 0 on success
- */
- BPF_FUNC_get_current_comm,
-
- /**
- * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
- * @skb: pointer to skb
- * Return: classid if != 0
- */
- BPF_FUNC_get_cgroup_classid,
- BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
- BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */
-
- /**
- * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
- * retrieve or populate tunnel metadata
- * @skb: pointer to skb
- * @key: pointer to 'struct bpf_tunnel_key'
- * @size: size of 'struct bpf_tunnel_key'
- * @flags: room for future extensions
- * Retrun: 0 on success
- */
- BPF_FUNC_skb_get_tunnel_key,
- BPF_FUNC_skb_set_tunnel_key,
- BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */
- /**
- * bpf_redirect(ifindex, flags) - redirect to another netdev
- * @ifindex: ifindex of the net device
- * @flags: bit 0 - if set, redirect to ingress instead of egress
- * other bits - reserved
- * Return: TC_ACT_REDIRECT
- */
- BPF_FUNC_redirect,
-
- /**
- * bpf_get_route_realm(skb) - retrieve a dst's tclassid
- * @skb: pointer to skb
- * Return: realm if != 0
- */
- BPF_FUNC_get_route_realm,
-
- /**
- * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample
- * @ctx: struct pt_regs*
- * @map: pointer to perf_event_array map
- * @index: index of event in the map
- * @data: data on stack to be output as raw data
- * @size: size of data
- * Return: 0 on success
- */
- BPF_FUNC_perf_event_output,
- BPF_FUNC_skb_load_bytes,
-
- /**
- * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id
- * @ctx: struct pt_regs*
- * @map: pointer to stack_trace map
- * @flags: bits 0-7 - numer of stack frames to skip
- * bit 8 - collect user stack instead of kernel
- * bit 9 - compare stacks by hash only
- * bit 10 - if two different stacks hash into the same stackid
- * discard old
- * other bits - reserved
- * Return: >= 0 stackid on success or negative error
- */
- BPF_FUNC_get_stackid,
-
- /**
- * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff
- * @from: raw from buffer
- * @from_size: length of from buffer
- * @to: raw to buffer
- * @to_size: length of to buffer
- * @seed: optional seed
- * Return: csum result
- */
- BPF_FUNC_csum_diff,
-
- /**
- * bpf_skb_[gs]et_tunnel_opt(skb, opt, size)
- * retrieve or populate tunnel options metadata
- * @skb: pointer to skb
- * @opt: pointer to raw tunnel option data
- * @size: size of @opt
- * Return: 0 on success for set, option size for get
- */
- BPF_FUNC_skb_get_tunnel_opt,
- BPF_FUNC_skb_set_tunnel_opt,
-
- /**
- * bpf_skb_change_proto(skb, proto, flags)
- * Change protocol of the skb. Currently supported is
- * v4 -> v6, v6 -> v4 transitions. The helper will also
- * resize the skb. eBPF program is expected to fill the
- * new headers via skb_store_bytes and lX_csum_replace.
- * @skb: pointer to skb
- * @proto: new skb->protocol type
- * @flags: reserved
- * Return: 0 on success or negative error
- */
- BPF_FUNC_skb_change_proto,
-
- /**
- * bpf_skb_change_type(skb, type)
- * Change packet type of skb.
- * @skb: pointer to skb
- * @type: new skb->pkt_type type
- * Return: 0 on success or negative error
- */
- BPF_FUNC_skb_change_type,
-
- /**
- * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb
- * @skb: pointer to skb
- * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
- * @index: index of the cgroup in the bpf_map
- * Return:
- * == 0 skb failed the cgroup2 descendant test
- * == 1 skb succeeded the cgroup2 descendant test
- * < 0 error
- */
- BPF_FUNC_skb_under_cgroup,
-
- /**
- * bpf_get_hash_recalc(skb)
- * Retrieve and possibly recalculate skb->hash.
- * @skb: pointer to skb
- * Return: hash
- */
- BPF_FUNC_get_hash_recalc,
-
- /**
- * u64 bpf_get_current_task(void)
- * Returns current task_struct
- * Return: current
- */
- BPF_FUNC_get_current_task,
-
- /**
- * bpf_probe_write_user(void *dst, void *src, int len)
- * safely attempt to write to a location
- * @dst: destination address in userspace
- * @src: source address on stack
- * @len: number of bytes to copy
- * Return: 0 on success or negative error
- */
- BPF_FUNC_probe_write_user,
-
- /**
- * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
- * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
- * @index: index of the cgroup in the bpf_map
- * Return:
- * == 0 current failed the cgroup2 descendant test
- * == 1 current succeeded the cgroup2 descendant test
- * < 0 error
- */
- BPF_FUNC_current_task_under_cgroup,
-
- /**
- * bpf_skb_change_tail(skb, len, flags)
- * The helper will resize the skb to the given new size,
- * to be used f.e. with control messages.
- * @skb: pointer to skb
- * @len: new skb length
- * @flags: reserved
- * Return: 0 on success or negative error
- */
- BPF_FUNC_skb_change_tail,
-
- /**
- * bpf_skb_pull_data(skb, len)
- * The helper will pull in non-linear data in case the
- * skb is non-linear and not all of len are part of the
- * linear section. Only needed for read/write with direct
- * packet access.
- * @skb: pointer to skb
- * @len: len to make read/writeable
- * Return: 0 on success or negative error
- */
- BPF_FUNC_skb_pull_data,
-
- /**
- * bpf_csum_update(skb, csum)
- * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
- * @skb: pointer to skb
- * @csum: csum to add
- * Return: csum on success or negative error
- */
- BPF_FUNC_csum_update,
-
- /**
- * bpf_set_hash_invalid(skb)
- * Invalidate current skb>hash.
- * @skb: pointer to skb
- */
- BPF_FUNC_set_hash_invalid,
-
+ __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
__BPF_FUNC_MAX_ID,
};
+#undef __BPF_ENUM_FN
/* All flags used by eBPF helper functions, placed here. */
@@ -501,6 +560,31 @@ struct bpf_tunnel_key {
__u32 tunnel_label;
};
+/* Generic BPF return codes which all BPF program types may support.
+ * The values are binary compatible with their TC_ACT_* counter-part to
+ * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
+ * programs.
+ *
+ * XDP is handled seprately, see XDP_*.
+ */
+enum bpf_ret_code {
+ BPF_OK = 0,
+ /* 1 reserved */
+ BPF_DROP = 2,
+ /* 3-6 reserved */
+ BPF_REDIRECT = 7,
+ /* >127 are reserved for prog type specific return codes */
+};
+
+struct bpf_sock {
+ __u32 bound_dev_if;
+ __u32 family;
+ __u32 type;
+ __u32 protocol;
+};
+
+#define XDP_PACKET_HEADROOM 256
+
/* User return codes for XDP prog type.
* A valid XDP program must return one of these defined values. All other
* return codes are reserved for future use. Unknown return codes will result
diff --git a/include/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 138bbf721e70..3cbc327801d6 100644
--- a/include/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -33,12 +33,6 @@
* SOFTWARE.
*/
-/*
- * Note: this framework is still in staging and it is likely the API
- * will change before it goes out of staging.
- *
- * Once it is moved out of staging this header will move to uapi.
- */
#ifndef _CEC_UAPI_FUNCS_H
#define _CEC_UAPI_FUNCS_H
@@ -90,7 +84,7 @@ static inline void cec_ops_inactive_source(const struct cec_msg *msg,
}
static inline void cec_msg_request_active_source(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[0] |= 0xf; /* broadcast */
@@ -115,7 +109,7 @@ static inline void cec_ops_routing_information(const struct cec_msg *msg,
}
static inline void cec_msg_routing_change(struct cec_msg *msg,
- bool reply,
+ int reply,
__u16 orig_phys_addr,
__u16 new_phys_addr)
{
@@ -162,7 +156,7 @@ static inline void cec_msg_standby(struct cec_msg *msg)
/* One Touch Record Feature */
-static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
+static inline void cec_msg_record_off(struct cec_msg *msg, int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_RECORD_OFF;
@@ -324,7 +318,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
}
static inline void cec_msg_record_on(struct cec_msg *msg,
- bool reply,
+ int reply,
const struct cec_op_record_src *rec_src)
{
switch (rec_src->type) {
@@ -391,7 +385,7 @@ static inline void cec_ops_record_status(const struct cec_msg *msg,
}
static inline void cec_msg_record_tv_screen(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN;
@@ -465,7 +459,7 @@ static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg,
}
static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 day,
__u8 month,
__u8 start_hr,
@@ -520,7 +514,7 @@ static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg,
}
static inline void cec_msg_clear_digital_timer(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 day,
__u8 month,
__u8 start_hr,
@@ -566,7 +560,7 @@ static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg,
}
static inline void cec_msg_clear_ext_timer(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 day,
__u8 month,
__u8 start_hr,
@@ -621,7 +615,7 @@ static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg,
}
static inline void cec_msg_set_analogue_timer(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 day,
__u8 month,
__u8 start_hr,
@@ -676,7 +670,7 @@ static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg,
}
static inline void cec_msg_set_digital_timer(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 day,
__u8 month,
__u8 start_hr,
@@ -722,7 +716,7 @@ static inline void cec_ops_set_digital_timer(const struct cec_msg *msg,
}
static inline void cec_msg_set_ext_timer(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 day,
__u8 month,
__u8 start_hr,
@@ -814,7 +808,7 @@ static inline void cec_ops_cec_version(const struct cec_msg *msg,
}
static inline void cec_msg_get_cec_version(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GET_CEC_VERSION;
@@ -840,7 +834,7 @@ static inline void cec_ops_report_physical_addr(const struct cec_msg *msg,
}
static inline void cec_msg_give_physical_addr(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR;
@@ -864,7 +858,7 @@ static inline void cec_ops_set_menu_language(const struct cec_msg *msg,
}
static inline void cec_msg_get_menu_language(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE;
@@ -913,7 +907,7 @@ static inline void cec_ops_report_features(const struct cec_msg *msg,
}
static inline void cec_msg_give_features(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GIVE_FEATURES;
@@ -950,7 +944,7 @@ static inline void cec_ops_deck_status(const struct cec_msg *msg,
}
static inline void cec_msg_give_deck_status(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 status_req)
{
msg->len = 3;
@@ -984,7 +978,7 @@ static inline void cec_ops_play(const struct cec_msg *msg,
struct cec_op_tuner_device_info {
__u8 rec_flag;
__u8 tuner_display_info;
- bool is_analog;
+ __u8 is_analog;
union {
struct cec_op_digital_service_id digital;
struct {
@@ -1054,7 +1048,7 @@ static inline void cec_ops_tuner_device_status(const struct cec_msg *msg,
}
static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 status_req)
{
msg->len = 3;
@@ -1137,7 +1131,7 @@ static inline void cec_ops_device_vendor_id(const struct cec_msg *msg,
}
static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID;
@@ -1273,7 +1267,7 @@ static inline void cec_ops_set_osd_name(const struct cec_msg *msg,
}
static inline void cec_msg_give_osd_name(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GIVE_OSD_NAME;
@@ -1297,7 +1291,7 @@ static inline void cec_ops_menu_status(const struct cec_msg *msg,
}
static inline void cec_msg_menu_request(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 menu_req)
{
msg->len = 3;
@@ -1314,7 +1308,7 @@ static inline void cec_ops_menu_request(const struct cec_msg *msg,
struct cec_op_ui_command {
__u8 ui_cmd;
- bool has_opt_arg;
+ __u8 has_opt_arg;
union {
struct cec_op_channel_data channel_identifier;
__u8 ui_broadcast_type;
@@ -1360,7 +1354,7 @@ static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
struct cec_op_ui_command *ui_cmd)
{
ui_cmd->ui_cmd = msg->msg[2];
- ui_cmd->has_opt_arg = false;
+ ui_cmd->has_opt_arg = 0;
if (msg->len == 3)
return;
switch (ui_cmd->ui_cmd) {
@@ -1372,12 +1366,12 @@ static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
case 0x6a:
/* The optional operand is one byte for all these ui commands */
ui_cmd->play_mode = msg->msg[3];
- ui_cmd->has_opt_arg = true;
+ ui_cmd->has_opt_arg = 1;
break;
case 0x67:
if (msg->len < 7)
break;
- ui_cmd->has_opt_arg = true;
+ ui_cmd->has_opt_arg = 1;
ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2;
ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4];
ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6];
@@ -1409,7 +1403,7 @@ static inline void cec_ops_report_power_status(const struct cec_msg *msg,
}
static inline void cec_msg_give_device_power_status(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS;
@@ -1469,7 +1463,7 @@ static inline void cec_ops_report_audio_status(const struct cec_msg *msg,
}
static inline void cec_msg_give_audio_status(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS;
@@ -1491,7 +1485,7 @@ static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg,
}
static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg,
- bool reply,
+ int reply,
__u16 phys_addr)
{
msg->len = phys_addr == 0xffff ? 2 : 4;
@@ -1526,7 +1520,7 @@ static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg,
}
static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS;
@@ -1566,7 +1560,7 @@ static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *m
}
static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg,
- bool reply,
+ int reply,
__u8 num_descriptors,
const __u8 *audio_format_id,
const __u8 *audio_format_code)
@@ -1624,7 +1618,7 @@ static inline void cec_msg_report_arc_initiated(struct cec_msg *msg)
}
static inline void cec_msg_initiate_arc(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_INITIATE_ARC;
@@ -1632,7 +1626,7 @@ static inline void cec_msg_initiate_arc(struct cec_msg *msg,
}
static inline void cec_msg_request_arc_initiation(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION;
@@ -1646,7 +1640,7 @@ static inline void cec_msg_report_arc_terminated(struct cec_msg *msg)
}
static inline void cec_msg_terminate_arc(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_TERMINATE_ARC;
@@ -1654,7 +1648,7 @@ static inline void cec_msg_terminate_arc(struct cec_msg *msg,
}
static inline void cec_msg_request_arc_termination(struct cec_msg *msg,
- bool reply)
+ int reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION;
@@ -1696,7 +1690,7 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
}
static inline void cec_msg_request_current_latency(struct cec_msg *msg,
- bool reply,
+ int reply,
__u16 phys_addr)
{
msg->len = 4;
diff --git a/include/linux/cec.h b/include/uapi/linux/cec.h
index 851968e803fa..14b6f24b189e 100644
--- a/include/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -33,16 +33,11 @@
* SOFTWARE.
*/
-/*
- * Note: this framework is still in staging and it is likely the API
- * will change before it goes out of staging.
- *
- * Once it is moved out of staging this header will move to uapi.
- */
#ifndef _CEC_UAPI_H
#define _CEC_UAPI_H
#include <linux/types.h>
+#include <linux/string.h>
#define CEC_MAX_MSG_SIZE 16
@@ -135,7 +130,7 @@ static inline int cec_msg_opcode(const struct cec_msg *msg)
* cec_msg_is_broadcast - return true if this is a broadcast message.
* @msg: the message structure
*/
-static inline bool cec_msg_is_broadcast(const struct cec_msg *msg)
+static inline int cec_msg_is_broadcast(const struct cec_msg *msg)
{
return (msg->msg[0] & 0xf) == 0xf;
}
@@ -175,7 +170,10 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
msg->reply = msg->timeout = 0;
}
-/* cec status field */
+/* cec_msg flags field */
+#define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0)
+
+/* cec_msg tx/rx_status field */
#define CEC_TX_STATUS_OK (1 << 0)
#define CEC_TX_STATUS_ARB_LOST (1 << 1)
#define CEC_TX_STATUS_NACK (1 << 2)
@@ -187,14 +185,14 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
#define CEC_RX_STATUS_TIMEOUT (1 << 1)
#define CEC_RX_STATUS_FEATURE_ABORT (1 << 2)
-static inline bool cec_msg_status_is_ok(const struct cec_msg *msg)
+static inline int cec_msg_status_is_ok(const struct cec_msg *msg)
{
if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK))
- return false;
+ return 0;
if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK))
- return false;
+ return 0;
if (!msg->tx_status && !msg->rx_status)
- return false;
+ return 0;
return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT);
}
@@ -257,47 +255,47 @@ static inline bool cec_msg_status_is_ok(const struct cec_msg *msg)
#define CEC_LOG_ADDR_MASK_SPECIFIC (1 << CEC_LOG_ADDR_SPECIFIC)
#define CEC_LOG_ADDR_MASK_UNREGISTERED (1 << CEC_LOG_ADDR_UNREGISTERED)
-static inline bool cec_has_tv(__u16 log_addr_mask)
+static inline int cec_has_tv(__u16 log_addr_mask)
{
return log_addr_mask & CEC_LOG_ADDR_MASK_TV;
}
-static inline bool cec_has_record(__u16 log_addr_mask)
+static inline int cec_has_record(__u16 log_addr_mask)
{
return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD;
}
-static inline bool cec_has_tuner(__u16 log_addr_mask)
+static inline int cec_has_tuner(__u16 log_addr_mask)
{
return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER;
}
-static inline bool cec_has_playback(__u16 log_addr_mask)
+static inline int cec_has_playback(__u16 log_addr_mask)
{
return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK;
}
-static inline bool cec_has_audiosystem(__u16 log_addr_mask)
+static inline int cec_has_audiosystem(__u16 log_addr_mask)
{
return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
}
-static inline bool cec_has_backup(__u16 log_addr_mask)
+static inline int cec_has_backup(__u16 log_addr_mask)
{
return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP;
}
-static inline bool cec_has_specific(__u16 log_addr_mask)
+static inline int cec_has_specific(__u16 log_addr_mask)
{
return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC;
}
-static inline bool cec_is_unregistered(__u16 log_addr_mask)
+static inline int cec_is_unregistered(__u16 log_addr_mask)
{
return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED;
}
-static inline bool cec_is_unconfigured(__u16 log_addr_mask)
+static inline int cec_is_unconfigured(__u16 log_addr_mask)
{
return log_addr_mask == 0;
}
@@ -391,6 +389,10 @@ struct cec_log_addrs {
/* Allow a fallback to unregistered */
#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
+/* Passthrough RC messages to the input subsystem */
+#define CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU (1 << 1)
+/* CDC-Only device: supports only CDC messages */
+#define CEC_LOG_ADDRS_FL_CDC_ONLY (1 << 2)
/* Events */
@@ -1011,4 +1013,54 @@ struct cec_event {
#define CEC_OP_HPD_ERROR_OTHER 3
#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO 4
+/* End of Messages */
+
+/* Helper functions to identify the 'special' CEC devices */
+
+static inline int cec_is_2nd_tv(const struct cec_log_addrs *las)
+{
+ /*
+ * It is a second TV if the logical address is 14 or 15 and the
+ * primary device type is a TV.
+ */
+ return las->num_log_addrs &&
+ las->log_addr[0] >= CEC_LOG_ADDR_SPECIFIC &&
+ las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_TV;
+}
+
+static inline int cec_is_processor(const struct cec_log_addrs *las)
+{
+ /*
+ * It is a processor if the logical address is 12-15 and the
+ * primary device type is a Processor.
+ */
+ return las->num_log_addrs &&
+ las->log_addr[0] >= CEC_LOG_ADDR_BACKUP_1 &&
+ las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_PROCESSOR;
+}
+
+static inline int cec_is_switch(const struct cec_log_addrs *las)
+{
+ /*
+ * It is a switch if the logical address is 15 and the
+ * primary device type is a Switch and the CDC-Only flag is not set.
+ */
+ return las->num_log_addrs == 1 &&
+ las->log_addr[0] == CEC_LOG_ADDR_UNREGISTERED &&
+ las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_SWITCH &&
+ !(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY);
+}
+
+static inline int cec_is_cdc_only(const struct cec_log_addrs *las)
+{
+ /*
+ * It is a CDC-only device if the logical address is 15 and the
+ * primary device type is a Switch and the CDC-Only flag is set.
+ */
+ return las->num_log_addrs == 1 &&
+ las->log_addr[0] == CEC_LOG_ADDR_UNREGISTERED &&
+ las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_SWITCH &&
+ (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY);
+}
+
#endif
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 79b5ded2001a..11d21fce14d6 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -46,6 +46,7 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
+ CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -112,5 +113,9 @@ struct crypto_report_kpp {
char type[CRYPTO_MAX_NAME];
};
+struct crypto_report_acomp {
+ char type[CRYPTO_MAX_NAME];
+};
+
#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
sizeof(struct crypto_report_blkcipher))
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 915bfa74458c..9014c33d4e77 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -102,6 +102,13 @@ enum devlink_eswitch_mode {
DEVLINK_ESWITCH_MODE_SWITCHDEV,
};
+enum devlink_eswitch_inline_mode {
+ DEVLINK_ESWITCH_INLINE_MODE_NONE,
+ DEVLINK_ESWITCH_INLINE_MODE_LINK,
+ DEVLINK_ESWITCH_INLINE_MODE_NETWORK,
+ DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT,
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -133,6 +140,7 @@ enum devlink_attr {
DEVLINK_ATTR_SB_OCC_CUR, /* u32 */
DEVLINK_ATTR_SB_OCC_MAX, /* u32 */
DEVLINK_ATTR_ESWITCH_MODE, /* u16 */
+ DEVLINK_ATTR_ESWITCH_INLINE_MODE, /* u8 */
/* add new attributes above here, update the policy in devlink.c */
diff --git a/include/uapi/linux/dm-log-userspace.h b/include/uapi/linux/dm-log-userspace.h
index 0fa0d9ef06a5..05e91e14c501 100644
--- a/include/uapi/linux/dm-log-userspace.h
+++ b/include/uapi/linux/dm-log-userspace.h
@@ -7,6 +7,7 @@
#ifndef __DM_LOG_USERSPACE_H__
#define __DM_LOG_USERSPACE_H__
+#include <linux/types.h>
#include <linux/dm-ioctl.h> /* For DM_UUID_LEN */
/*
@@ -147,12 +148,12 @@
/*
* DM_ULOG_GET_REGION_SIZE corresponds to (found in dm-dirty-log.h):
- * uint32_t (*get_region_size)(struct dm_dirty_log *log);
+ * __u32 (*get_region_size)(struct dm_dirty_log *log);
*
* Payload-to-userspace:
* None.
* Payload-to-kernel:
- * uint64_t - contains the region size
+ * __u64 - contains the region size
*
* The region size is something that was determined at constructor time.
* It is returned in the payload area and 'data_size' is set to
@@ -168,11 +169,11 @@
* int (*is_clean)(struct dm_dirty_log *log, region_t region);
*
* Payload-to-userspace:
- * uint64_t - the region to get clean status on
+ * __u64 - the region to get clean status on
* Payload-to-kernel:
- * int64_t - 1 if clean, 0 otherwise
+ * __s64 - 1 if clean, 0 otherwise
*
- * Payload is sizeof(uint64_t) and contains the region for which the clean
+ * Payload is sizeof(__u64) and contains the region for which the clean
* status is being made.
*
* When the request has been processed, user-space must return the
@@ -187,9 +188,9 @@
* int can_block);
*
* Payload-to-userspace:
- * uint64_t - the region to get sync status on
+ * __u64 - the region to get sync status on
* Payload-to-kernel:
- * int64_t - 1 if in-sync, 0 otherwise
+ * __s64 - 1 if in-sync, 0 otherwise
*
* Exactly the same as 'is_clean' above, except this time asking "has the
* region been recovered?" vs. "is the region not being modified?"
@@ -203,7 +204,7 @@
* Payload-to-userspace:
* If the 'integrated_flush' directive is present in the constructor
* table, the payload is as same as DM_ULOG_MARK_REGION:
- * uint64_t [] - region(s) to mark
+ * __u64 [] - region(s) to mark
* else
* None
* Payload-to-kernel:
@@ -225,13 +226,13 @@
* void (*mark_region)(struct dm_dirty_log *log, region_t region);
*
* Payload-to-userspace:
- * uint64_t [] - region(s) to mark
+ * __u64 [] - region(s) to mark
* Payload-to-kernel:
* None.
*
* Incoming payload contains the one or more regions to mark dirty.
* The number of regions contained in the payload can be determined from
- * 'data_size/sizeof(uint64_t)'.
+ * 'data_size/sizeof(__u64)'.
*
* When the request has been processed, user-space must return the
* dm_ulog_request to the kernel - setting the 'error' field and clearing
@@ -244,13 +245,13 @@
* void (*clear_region)(struct dm_dirty_log *log, region_t region);
*
* Payload-to-userspace:
- * uint64_t [] - region(s) to clear
+ * __u64 [] - region(s) to clear
* Payload-to-kernel:
* None.
*
* Incoming payload contains the one or more regions to mark clean.
* The number of regions contained in the payload can be determined from
- * 'data_size/sizeof(uint64_t)'.
+ * 'data_size/sizeof(__u64)'.
*
* When the request has been processed, user-space must return the
* dm_ulog_request to the kernel - setting the 'error' field and clearing
@@ -266,8 +267,8 @@
* None.
* Payload-to-kernel:
* {
- * int64_t i; -- 1 if recovery necessary, 0 otherwise
- * uint64_t r; -- The region to recover if i=1
+ * __s64 i; -- 1 if recovery necessary, 0 otherwise
+ * __u64 r; -- The region to recover if i=1
* }
* 'data_size' should be set appropriately.
*
@@ -283,8 +284,8 @@
*
* Payload-to-userspace:
* {
- * uint64_t - region to set sync state on
- * int64_t - 0 if not-in-sync, 1 if in-sync
+ * __u64 - region to set sync state on
+ * __s64 - 0 if not-in-sync, 1 if in-sync
* }
* Payload-to-kernel:
* None.
@@ -302,7 +303,7 @@
* Payload-to-userspace:
* None.
* Payload-to-kernel:
- * uint64_t - the number of in-sync regions
+ * __u64 - the number of in-sync regions
*
* No incoming payload. Kernel-bound payload contains the number of
* regions that are in-sync (in a size_t).
@@ -350,11 +351,11 @@
* int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
*
* Payload-to-userspace:
- * uint64_t - region to determine recovery status on
+ * __u64 - region to determine recovery status on
* Payload-to-kernel:
* {
- * int64_t is_recovering; -- 0 if no, 1 if yes
- * uint64_t in_sync_hint; -- lowest region still needing resync
+ * __s64 is_recovering; -- 0 if no, 1 if yes
+ * __u64 in_sync_hint; -- lowest region still needing resync
* }
*
* When the request has been processed, user-space must return the
@@ -413,16 +414,16 @@ struct dm_ulog_request {
* differentiate between logs that are being swapped and have the
* same 'uuid'. (Think "live" and "inactive" device-mapper tables.)
*/
- uint64_t luid;
+ __u64 luid;
char uuid[DM_UUID_LEN];
char padding[3]; /* Padding because DM_UUID_LEN = 129 */
- uint32_t version; /* See DM_ULOG_REQUEST_VERSION */
- int32_t error; /* Used to report back processing errors */
+ __u32 version; /* See DM_ULOG_REQUEST_VERSION */
+ __s32 error; /* Used to report back processing errors */
- uint32_t seq; /* Sequence number for request */
- uint32_t request_type; /* DM_ULOG_* defined above */
- uint32_t data_size; /* How much data (not including this struct) */
+ __u32 seq; /* Sequence number for request */
+ __u32 request_type; /* DM_ULOG_* defined above */
+ __u32 data_size; /* How much data (not including this struct) */
char data[0];
};
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 8e547231c1b7..f0db7788f887 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -248,6 +248,19 @@ struct ethtool_tunable {
void *data[0];
};
+#define DOWNSHIFT_DEV_DEFAULT_COUNT 0xff
+#define DOWNSHIFT_DEV_DISABLE 0
+
+enum phy_tunable_id {
+ ETHTOOL_PHY_ID_UNSPEC,
+ ETHTOOL_PHY_DOWNSHIFT,
+ /*
+ * Add your fresh new phy tunable attribute above and remember to update
+ * phy_tunable_strings[] in net/core/ethtool.c
+ */
+ __ETHTOOL_PHY_TUNABLE_COUNT,
+};
+
/**
* struct ethtool_regs - hardware register dump
* @cmd: Command number = %ETHTOOL_GREGS
@@ -548,6 +561,7 @@ struct ethtool_pauseparam {
* @ETH_SS_FEATURES: Device feature names
* @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
* @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
+ * @ETH_SS_PHY_TUNABLES: PHY tunable names
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -558,6 +572,7 @@ enum ethtool_stringset {
ETH_SS_RSS_HASH_FUNCS,
ETH_SS_TUNABLES,
ETH_SS_PHY_STATS,
+ ETH_SS_PHY_TUNABLES,
};
/**
@@ -1313,7 +1328,8 @@ struct ethtool_per_queue_op {
#define ETHTOOL_GLINKSETTINGS 0x0000004c /* Get ethtool_link_settings */
#define ETHTOOL_SLINKSETTINGS 0x0000004d /* Set ethtool_link_settings */
-
+#define ETHTOOL_PHY_GTUNABLE 0x0000004e /* Get PHY tunable configuration */
+#define ETHTOOL_PHY_STUNABLE 0x0000004f /* Set PHY tunable configuration */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 14404b3ebb89..bbf02a63a011 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -29,6 +29,11 @@ struct fib_rule_hdr {
__u32 flags;
};
+struct fib_rule_uid_range {
+ __u32 start;
+ __u32 end;
+};
+
enum {
FRA_UNSPEC,
FRA_DST, /* destination address */
@@ -51,6 +56,7 @@ enum {
FRA_OIFNAME,
FRA_PAD,
FRA_L3MDEV, /* iif or oif is l3mdev goto its table */
+ FRA_UID_RANGE, /* UID range */
__FRA_MAX
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index acb2b6152ba0..36da93fbf188 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -225,6 +225,10 @@ struct fsxattr {
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
+/*
+ * A jump here: 130-131 are reserved for zoned block devices
+ * (see uapi/linux/blkzoned.h)
+ */
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -254,6 +258,20 @@ struct fsxattr {
/* Policy provided via an ioctl on the topmost directory */
#define FS_KEY_DESCRIPTOR_SIZE 8
+#define FS_POLICY_FLAGS_PAD_4 0x00
+#define FS_POLICY_FLAGS_PAD_8 0x01
+#define FS_POLICY_FLAGS_PAD_16 0x02
+#define FS_POLICY_FLAGS_PAD_32 0x03
+#define FS_POLICY_FLAGS_PAD_MASK 0x03
+#define FS_POLICY_FLAGS_VALID 0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID 0
+#define FS_ENCRYPTION_MODE_AES_256_XTS 1
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3
+#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+
struct fscrypt_policy {
__u8 version;
__u8 contents_encryption_mode;
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index 5512c90af7e3..adc899381e0d 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -26,10 +26,11 @@ struct genlmsghdr {
/*
* List of reserved static generic netlink identifiers:
*/
-#define GENL_ID_GENERATE 0
#define GENL_ID_CTRL NLMSG_MIN_TYPE
#define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1)
#define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2)
+/* must be last reserved + 1 */
+#define GENL_START_ALLOC (NLMSG_MIN_TYPE + 3)
/**************************************************************************
* Controller
diff --git a/include/uapi/linux/hw_breakpoint.h b/include/uapi/linux/hw_breakpoint.h
index b04000a2296a..2b65efd19a46 100644
--- a/include/uapi/linux/hw_breakpoint.h
+++ b/include/uapi/linux/hw_breakpoint.h
@@ -4,7 +4,11 @@
enum {
HW_BREAKPOINT_LEN_1 = 1,
HW_BREAKPOINT_LEN_2 = 2,
+ HW_BREAKPOINT_LEN_3 = 3,
HW_BREAKPOINT_LEN_4 = 4,
+ HW_BREAKPOINT_LEN_5 = 5,
+ HW_BREAKPOINT_LEN_6 = 6,
+ HW_BREAKPOINT_LEN_7 = 7,
HW_BREAKPOINT_LEN_8 = 8,
};
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 117d02e0fc31..3e5185e9ef03 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -35,6 +35,9 @@
#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
#define ETH_FCS_LEN 4 /* Octets in the FCS */
+#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */
+#define ETH_MAX_MTU 0xFFFFU /* 65535, same as IP_MAX_MTU */
+
/*
* These are the defined Ethernet Protocol ID's.
*/
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index b4fba662cd32..6b13e591abc9 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -275,6 +275,8 @@ enum {
IFLA_BR_PAD,
IFLA_BR_VLAN_STATS_ENABLED,
IFLA_BR_MCAST_STATS_ENABLED,
+ IFLA_BR_MCAST_IGMP_VERSION,
+ IFLA_BR_MCAST_MLD_VERSION,
__IFLA_BR_MAX,
};
@@ -874,10 +876,14 @@ enum {
/* XDP section */
+#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
+#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST)
+
enum {
IFLA_XDP_UNSPEC,
IFLA_XDP_FD,
IFLA_XDP_ATTACHED,
+ IFLA_XDP_FLAGS,
__IFLA_XDP_MAX,
};
diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h
index 4bd1f55d6377..6418c4d10241 100644
--- a/include/uapi/linux/if_pppol2tp.h
+++ b/include/uapi/linux/if_pppol2tp.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/l2tp.h>
/* Structure used to connect() the socket to a particular tunnel UDP
* socket over IPv4.
@@ -90,14 +91,12 @@ enum {
PPPOL2TP_SO_REORDERTO = 5,
};
-/* Debug message categories for the DEBUG socket option */
+/* Debug message categories for the DEBUG socket option (deprecated) */
enum {
- PPPOL2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
- * compiled in) */
- PPPOL2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
- * interface */
- PPPOL2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
- PPPOL2TP_MSG_DATA = (1 << 3), /* data packets */
+ PPPOL2TP_MSG_DEBUG = L2TP_MSG_DEBUG,
+ PPPOL2TP_MSG_CONTROL = L2TP_MSG_CONTROL,
+ PPPOL2TP_MSG_SEQ = L2TP_MSG_SEQ,
+ PPPOL2TP_MSG_DATA = L2TP_MSG_DATA,
};
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 22e5e589a274..e54d14a7f876 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -40,6 +40,8 @@ enum iio_chan_type {
IIO_PH,
IIO_UVINDEX,
IIO_ELECTRICALCONDUCTIVITY,
+ IIO_COUNT,
+ IIO_INDEX,
};
enum iio_modifier {
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index eaf94919291a..4e557f4e9553 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -117,6 +117,7 @@ struct in_addr {
#define IP_NODEFRAG 22
#define IP_CHECKSUM 23
#define IP_BIND_ADDRESS_NO_PORT 24
+#define IP_RECVFRAGSIZE 25
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index b39ea4f2e701..46444f8fbee4 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -283,6 +283,7 @@ struct in6_flowlabel_req {
#define IPV6_RECVORIGDSTADDR IPV6_ORIGDSTADDR
#define IPV6_TRANSPARENT 75
#define IPV6_UNICAST_IF 76
+#define IPV6_RECVFRAGSIZE 77
/*
* Multicast Routing:
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 509cd961068d..bbe201047df6 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -43,6 +43,23 @@ struct inet_diag_req_v2 {
struct inet_diag_sockid id;
};
+/*
+ * SOCK_RAW sockets require the underlied protocol to be
+ * additionally specified so we can use @pad member for
+ * this, but we can't rename it because userspace programs
+ * still may depend on this name. Instead lets use another
+ * structure definition as an alias for struct
+ * @inet_diag_req_v2.
+ */
+struct inet_diag_req_raw {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+ __u8 idiag_ext;
+ __u8 sdiag_raw_protocol;
+ __u32 idiag_states;
+ struct inet_diag_sockid id;
+};
+
enum {
INET_DIAG_REQ_NONE,
INET_DIAG_REQ_BYTECODE,
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 8c2772340c3f..eaf65dc82e22 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -39,6 +39,7 @@ struct in6_ifreq {
#define IPV6_SRCRT_STRICT 0x01 /* Deprecated; will be removed */
#define IPV6_SRCRT_TYPE_0 0 /* Deprecated; will be removed */
#define IPV6_SRCRT_TYPE_2 2 /* IPv6 type 2 Routing Header */
+#define IPV6_SRCRT_TYPE_4 4 /* Segment Routing with IPv6 */
/*
* routing header
@@ -178,6 +179,9 @@ enum {
DEVCONF_DROP_UNSOLICITED_NA,
DEVCONF_KEEP_ADDR_ON_DOWN,
DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
+ DEVCONF_SEG6_ENABLED,
+ DEVCONF_SEG6_REQUIRE_HMAC,
+ DEVCONF_ENHANCED_DAD,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4ee67cb99143..cac48eda1075 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -651,6 +651,9 @@ struct kvm_enable_cap {
};
/* for KVM_PPC_GET_PVINFO */
+
+#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
+
struct kvm_ppc_pvinfo {
/* out */
__u32 flags;
@@ -682,8 +685,6 @@ struct kvm_ppc_smmu_info {
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
-#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
-
#define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 4bd27d0270a2..85ddb74fcd1c 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -108,7 +108,7 @@ enum {
L2TP_ATTR_VLAN_ID, /* u16 */
L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
- L2TP_ATTR_DEBUG, /* u32 */
+ L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
L2TP_ATTR_RECV_SEQ, /* u8 */
L2TP_ATTR_SEND_SEQ, /* u8 */
L2TP_ATTR_LNS_MODE, /* u8 */
@@ -124,8 +124,8 @@ enum {
L2TP_ATTR_STATS, /* nested */
L2TP_ATTR_IP6_SADDR, /* struct in6_addr */
L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
- L2TP_ATTR_UDP_ZERO_CSUM6_TX, /* u8 */
- L2TP_ATTR_UDP_ZERO_CSUM6_RX, /* u8 */
+ L2TP_ATTR_UDP_ZERO_CSUM6_TX, /* flag */
+ L2TP_ATTR_UDP_ZERO_CSUM6_RX, /* flag */
L2TP_ATTR_PAD,
__L2TP_ATTR_MAX,
};
@@ -175,6 +175,21 @@ enum l2tp_seqmode {
L2TP_SEQ_ALL = 2,
};
+/**
+ * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions
+ *
+ * @L2TP_MSG_DEBUG: verbose debug (if compiled in)
+ * @L2TP_MSG_CONTROL: userspace - kernel interface
+ * @L2TP_MSG_SEQ: sequence numbers
+ * @L2TP_MSG_DATA: data packets
+ */
+enum l2tp_debug_flags {
+ L2TP_MSG_DEBUG = (1 << 0),
+ L2TP_MSG_CONTROL = (1 << 1),
+ L2TP_MSG_SEQ = (1 << 2),
+ L2TP_MSG_DATA = (1 << 3),
+};
+
/*
* NETLINK_GENERIC related info
*/
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index a478fe80e203..92724cba1eba 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -9,6 +9,8 @@ enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_IP,
LWTUNNEL_ENCAP_ILA,
LWTUNNEL_ENCAP_IP6,
+ LWTUNNEL_ENCAP_SEG6,
+ LWTUNNEL_ENCAP_BPF,
__LWTUNNEL_ENCAP_MAX,
};
@@ -42,4 +44,26 @@ enum lwtunnel_ip6_t {
#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
+enum {
+ LWT_BPF_PROG_UNSPEC,
+ LWT_BPF_PROG_FD,
+ LWT_BPF_PROG_NAME,
+ __LWT_BPF_PROG_MAX,
+};
+
+#define LWT_BPF_PROG_MAX (__LWT_BPF_PROG_MAX - 1)
+
+enum {
+ LWT_BPF_UNSPEC,
+ LWT_BPF_IN,
+ LWT_BPF_OUT,
+ LWT_BPF_XMIT,
+ LWT_BPF_XMIT_HEADROOM,
+ __LWT_BPF_MAX,
+};
+
+#define LWT_BPF_MAX (__LWT_BPF_MAX - 1)
+
+#define LWT_BPF_MAX_HEADROOM 256
+
#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 9bd559472c92..e230af2e6855 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -57,6 +57,7 @@
#define CGROUP_SUPER_MAGIC 0x27e0eb
#define CGROUP2_SUPER_MAGIC 0x63677270
+#define RDTGROUP_SUPER_MAGIC 0x7655821
#define STACK_END_MAGIC 0x57AC6E9D
diff --git a/include/uapi/linux/major.h b/include/uapi/linux/major.h
index 620252e69b44..19e195bee990 100644
--- a/include/uapi/linux/major.h
+++ b/include/uapi/linux/major.h
@@ -3,7 +3,7 @@
/*
* This file has definitions for major device numbers.
- * For the device number assignments, see Documentation/devices.txt.
+ * For the device number assignments, see Documentation/admin-guide/devices.rst.
*/
#define UNNAMED_MAJOR 0
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 7e385b83b9d8..700a55156eee 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -69,6 +69,6 @@ struct mmc_ioc_multi_cmd {
* is enforced per ioctl call. For larger data transfers, use the normal
* block device operations.
*/
-#define MMC_IOC_MAX_BYTES (512L * 256)
+#define MMC_IOC_MAX_BYTES (512L * 1024)
#define MMC_IOC_MAX_CMDS 255
#endif /* LINUX_MMC_IOCTL_H */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index e08e413d5f71..c91c642ea900 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -38,11 +38,12 @@ enum {
};
/* values for flags field */
-#define NBD_FLAG_HAS_FLAGS (1 << 0) /* nbd-server supports flags */
-#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */
-#define NBD_FLAG_SEND_FLUSH (1 << 2) /* can flush writeback cache */
+#define NBD_FLAG_HAS_FLAGS (1 << 0) /* nbd-server supports flags */
+#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */
+#define NBD_FLAG_SEND_FLUSH (1 << 2) /* can flush writeback cache */
/* there is a gap here to match userspace */
-#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
+#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
+#define NBD_FLAG_CAN_MULTI_CONN (1 << 8) /* Server supports multiple connections per export. */
/* userspace doesn't need the nbd_device structure */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 264e515de16f..464dcca5ed68 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -25,8 +25,9 @@ enum {
SOF_TIMESTAMPING_TX_ACK = (1<<9),
SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
SOF_TIMESTAMPING_OPT_TSONLY = (1<<11),
+ SOF_TIMESTAMPING_OPT_STATS = (1<<12),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TSONLY,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_STATS,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index d93f949d1d9a..7550e9176a54 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -13,7 +13,7 @@
#define NF_STOLEN 2
#define NF_QUEUE 3
#define NF_REPEAT 4
-#define NF_STOP 5
+#define NF_STOP 5 /* Deprecated, for userspace nf_queue compatibility. */
#define NF_MAX_VERDICT NF_STOP
/* we overload the higher bits for encoding auxiliary data such as the queue
diff --git a/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h b/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h
index a9c3834abdd4..526b42496b78 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h
@@ -2,7 +2,10 @@
#define _NF_CONNTRACK_TUPLE_COMMON_H
#include <linux/types.h>
+#ifndef __KERNEL__
#include <linux/netfilter.h>
+#endif
+#include <linux/netfilter/nf_conntrack_common.h> /* IP_CT_IS_REPLY */
enum ip_conntrack_dir {
IP_CT_DIR_ORIGINAL,
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c6c4477c136b..881d49e94569 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -4,6 +4,7 @@
#define NFT_TABLE_MAXNAMELEN 32
#define NFT_CHAIN_MAXNAMELEN 32
#define NFT_SET_MAXNAMELEN 32
+#define NFT_OBJ_MAXNAMELEN 32
#define NFT_USERDATA_MAXLEN 256
/**
@@ -85,6 +86,10 @@ enum nft_verdicts {
* @NFT_MSG_NEWGEN: announce a new generation, only for events (enum nft_gen_attributes)
* @NFT_MSG_GETGEN: get the rule-set generation (enum nft_gen_attributes)
* @NFT_MSG_TRACE: trace event (enum nft_trace_attributes)
+ * @NFT_MSG_NEWOBJ: create a stateful object (enum nft_obj_attributes)
+ * @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes)
+ * @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes)
+ * @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -105,6 +110,10 @@ enum nf_tables_msg_types {
NFT_MSG_NEWGEN,
NFT_MSG_GETGEN,
NFT_MSG_TRACE,
+ NFT_MSG_NEWOBJ,
+ NFT_MSG_GETOBJ,
+ NFT_MSG_DELOBJ,
+ NFT_MSG_GETOBJ_RESET,
NFT_MSG_MAX,
};
@@ -246,6 +255,7 @@ enum nft_rule_compat_attributes {
* @NFT_SET_MAP: set is used as a dictionary
* @NFT_SET_TIMEOUT: set uses timeouts
* @NFT_SET_EVAL: set contains expressions for evaluation
+ * @NFT_SET_OBJECT: set contains stateful objects
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
@@ -254,6 +264,7 @@ enum nft_set_flags {
NFT_SET_MAP = 0x8,
NFT_SET_TIMEOUT = 0x10,
NFT_SET_EVAL = 0x20,
+ NFT_SET_OBJECT = 0x40,
};
/**
@@ -295,6 +306,7 @@ enum nft_set_desc_attributes {
* @NFTA_SET_TIMEOUT: default timeout value (NLA_U64)
* @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
* @NFTA_SET_USERDATA: user data (NLA_BINARY)
+ * @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -312,6 +324,7 @@ enum nft_set_attributes {
NFTA_SET_GC_INTERVAL,
NFTA_SET_USERDATA,
NFTA_SET_PAD,
+ NFTA_SET_OBJ_TYPE,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -335,6 +348,7 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_EXPIRATION: expiration time (NLA_U64)
* @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_SET_ELEM_OBJREF: stateful object reference (NLA_STRING)
*/
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
@@ -346,6 +360,7 @@ enum nft_set_elem_attributes {
NFTA_SET_ELEM_USERDATA,
NFTA_SET_ELEM_EXPR,
NFTA_SET_ELEM_PAD,
+ NFTA_SET_ELEM_OBJREF,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -659,6 +674,10 @@ enum nft_payload_csum_types {
NFT_PAYLOAD_CSUM_INET,
};
+enum nft_payload_csum_flags {
+ NFT_PAYLOAD_L4CSUM_PSEUDOHDR = (1 << 0),
+};
+
/**
* enum nft_payload_attributes - nf_tables payload expression netlink attributes
*
@@ -669,6 +688,7 @@ enum nft_payload_csum_types {
* @NFTA_PAYLOAD_SREG: source register to load data from (NLA_U32: nft_registers)
* @NFTA_PAYLOAD_CSUM_TYPE: checksum type (NLA_U32)
* @NFTA_PAYLOAD_CSUM_OFFSET: checksum offset relative to base (NLA_U32)
+ * @NFTA_PAYLOAD_CSUM_FLAGS: checksum flags (NLA_U32)
*/
enum nft_payload_attributes {
NFTA_PAYLOAD_UNSPEC,
@@ -679,6 +699,7 @@ enum nft_payload_attributes {
NFTA_PAYLOAD_SREG,
NFTA_PAYLOAD_CSUM_TYPE,
NFTA_PAYLOAD_CSUM_OFFSET,
+ NFTA_PAYLOAD_CSUM_FLAGS,
__NFTA_PAYLOAD_MAX
};
#define NFTA_PAYLOAD_MAX (__NFTA_PAYLOAD_MAX - 1)
@@ -759,6 +780,19 @@ enum nft_meta_keys {
};
/**
+ * enum nft_rt_keys - nf_tables routing expression keys
+ *
+ * @NFT_RT_CLASSID: realm value of packet's route (skb->dst->tclassid)
+ * @NFT_RT_NEXTHOP4: routing nexthop for IPv4
+ * @NFT_RT_NEXTHOP6: routing nexthop for IPv6
+ */
+enum nft_rt_keys {
+ NFT_RT_CLASSID,
+ NFT_RT_NEXTHOP4,
+ NFT_RT_NEXTHOP6,
+};
+
+/**
* enum nft_hash_attributes - nf_tables hash expression netlink attributes
*
* @NFTA_HASH_SREG: source register (NLA_U32)
@@ -797,6 +831,20 @@ enum nft_meta_attributes {
#define NFTA_META_MAX (__NFTA_META_MAX - 1)
/**
+ * enum nft_rt_attributes - nf_tables routing expression netlink attributes
+ *
+ * @NFTA_RT_DREG: destination register (NLA_U32)
+ * @NFTA_RT_KEY: routing data item to load (NLA_U32: nft_rt_keys)
+ */
+enum nft_rt_attributes {
+ NFTA_RT_UNSPEC,
+ NFTA_RT_DREG,
+ NFTA_RT_KEY,
+ __NFTA_RT_MAX
+};
+#define NFTA_RT_MAX (__NFTA_RT_MAX - 1)
+
+/**
* enum nft_ct_keys - nf_tables ct expression keys
*
* @NFT_CT_STATE: conntrack state (bitmask of enum ip_conntrack_info)
@@ -941,6 +989,7 @@ enum nft_queue_attributes {
enum nft_quota_flags {
NFT_QUOTA_F_INV = (1 << 0),
+ NFT_QUOTA_F_DEPLETED = (1 << 1),
};
/**
@@ -948,12 +997,14 @@ enum nft_quota_flags {
*
* @NFTA_QUOTA_BYTES: quota in bytes (NLA_U16)
* @NFTA_QUOTA_FLAGS: flags (NLA_U32)
+ * @NFTA_QUOTA_CONSUMED: quota already consumed in bytes (NLA_U64)
*/
enum nft_quota_attributes {
NFTA_QUOTA_UNSPEC,
NFTA_QUOTA_BYTES,
NFTA_QUOTA_FLAGS,
NFTA_QUOTA_PAD,
+ NFTA_QUOTA_CONSUMED,
__NFTA_QUOTA_MAX
};
#define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1)
@@ -1098,6 +1149,26 @@ enum nft_fwd_attributes {
#define NFTA_FWD_MAX (__NFTA_FWD_MAX - 1)
/**
+ * enum nft_objref_attributes - nf_tables stateful object expression netlink attributes
+ *
+ * @NFTA_OBJREF_IMM_TYPE: object type for immediate reference (NLA_U32: nft_register)
+ * @NFTA_OBJREF_IMM_NAME: object name for immediate reference (NLA_STRING)
+ * @NFTA_OBJREF_SET_SREG: source register of the data to look for (NLA_U32: nft_registers)
+ * @NFTA_OBJREF_SET_NAME: name of the set where to look for (NLA_STRING)
+ * @NFTA_OBJREF_SET_ID: id of the set where to look for in this transaction (NLA_U32)
+ */
+enum nft_objref_attributes {
+ NFTA_OBJREF_UNSPEC,
+ NFTA_OBJREF_IMM_TYPE,
+ NFTA_OBJREF_IMM_NAME,
+ NFTA_OBJREF_SET_SREG,
+ NFTA_OBJREF_SET_NAME,
+ NFTA_OBJREF_SET_ID,
+ __NFTA_OBJREF_MAX
+};
+#define NFTA_OBJREF_MAX (__NFTA_OBJREF_MAX - 1)
+
+/**
* enum nft_gen_attributes - nf_tables ruleset generation attributes
*
* @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
@@ -1109,6 +1180,68 @@ enum nft_gen_attributes {
};
#define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1)
+/*
+ * enum nft_fib_attributes - nf_tables fib expression netlink attributes
+ *
+ * @NFTA_FIB_DREG: destination register (NLA_U32)
+ * @NFTA_FIB_RESULT: desired result (NLA_U32)
+ * @NFTA_FIB_FLAGS: flowi fields to initialize when querying the FIB (NLA_U32)
+ *
+ * The FIB expression performs a route lookup according
+ * to the packet data.
+ */
+enum nft_fib_attributes {
+ NFTA_FIB_UNSPEC,
+ NFTA_FIB_DREG,
+ NFTA_FIB_RESULT,
+ NFTA_FIB_FLAGS,
+ __NFTA_FIB_MAX
+};
+#define NFTA_FIB_MAX (__NFTA_FIB_MAX - 1)
+
+enum nft_fib_result {
+ NFT_FIB_RESULT_UNSPEC,
+ NFT_FIB_RESULT_OIF,
+ NFT_FIB_RESULT_OIFNAME,
+ NFT_FIB_RESULT_ADDRTYPE,
+ __NFT_FIB_RESULT_MAX
+};
+#define NFT_FIB_RESULT_MAX (__NFT_FIB_RESULT_MAX - 1)
+
+enum nft_fib_flags {
+ NFTA_FIB_F_SADDR = 1 << 0, /* look up src */
+ NFTA_FIB_F_DADDR = 1 << 1, /* look up dst */
+ NFTA_FIB_F_MARK = 1 << 2, /* use skb->mark */
+ NFTA_FIB_F_IIF = 1 << 3, /* restrict to iif */
+ NFTA_FIB_F_OIF = 1 << 4, /* restrict to oif */
+};
+
+#define NFT_OBJECT_UNSPEC 0
+#define NFT_OBJECT_COUNTER 1
+#define NFT_OBJECT_QUOTA 2
+#define __NFT_OBJECT_MAX 3
+#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
+
+/**
+ * enum nft_object_attributes - nf_tables stateful object netlink attributes
+ *
+ * @NFTA_OBJ_TABLE: name of the table containing the expression (NLA_STRING)
+ * @NFTA_OBJ_NAME: name of this expression type (NLA_STRING)
+ * @NFTA_OBJ_TYPE: stateful object type (NLA_U32)
+ * @NFTA_OBJ_DATA: stateful object data (NLA_NESTED)
+ * @NFTA_OBJ_USE: number of references to this expression (NLA_U32)
+ */
+enum nft_object_attributes {
+ NFTA_OBJ_UNSPEC,
+ NFTA_OBJ_TABLE,
+ NFTA_OBJ_NAME,
+ NFTA_OBJ_TYPE,
+ NFTA_OBJ_DATA,
+ NFTA_OBJ_USE,
+ __NFTA_OBJ_MAX
+};
+#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
+
/**
* enum nft_trace_attributes - nf_tables trace netlink attributes
*
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h
index 1fad2c27ac32..b97725af2ac0 100644
--- a/include/uapi/linux/netfilter/xt_bpf.h
+++ b/include/uapi/linux/netfilter/xt_bpf.h
@@ -2,9 +2,11 @@
#define _XT_BPF_H
#include <linux/filter.h>
+#include <linux/limits.h>
#include <linux/types.h>
#define XT_BPF_MAX_NUM_INSTR 64
+#define XT_BPF_PATH_MAX (XT_BPF_MAX_NUM_INSTR * sizeof(struct sock_filter))
struct bpf_prog;
@@ -16,4 +18,23 @@ struct xt_bpf_info {
struct bpf_prog *filter __attribute__((aligned(8)));
};
+enum xt_bpf_modes {
+ XT_BPF_MODE_BYTECODE,
+ XT_BPF_MODE_FD_PINNED,
+ XT_BPF_MODE_FD_ELF,
+};
+
+struct xt_bpf_info_v1 {
+ __u16 mode;
+ __u16 bpf_program_num_elem;
+ __s32 fd;
+ union {
+ struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
+ char path[XT_BPF_PATH_MAX];
+ };
+
+ /* only used in the kernel */
+ struct bpf_prog *filter __attribute__((aligned(8)));
+};
+
#endif /*_XT_BPF_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 56368e9b4622..6b76e3b0c18e 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -323,7 +323,7 @@
* @NL80211_CMD_GET_SCAN: get scan results
* @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
* %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
- * probe requests at CCK rate or not. %NL80211_ATTR_MAC can be used to
+ * probe requests at CCK rate or not. %NL80211_ATTR_BSSID can be used to
* specify a BSSID to scan for; if not included, the wildcard BSSID will
* be used.
* @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
@@ -600,6 +600,20 @@
*
* @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface.
*
+ * @NL80211_CMD_SET_MULTICAST_TO_UNICAST: Configure if this AP should perform
+ * multicast to unicast conversion. When enabled, all multicast packets
+ * with ethertype ARP, IPv4 or IPv6 (possibly within an 802.1Q header)
+ * will be sent out to each station once with the destination (multicast)
+ * MAC address replaced by the station's MAC address. Note that this may
+ * break certain expectations of the receiver, e.g. the ability to drop
+ * unicast IP packets encapsulated in multicast L2 frames, or the ability
+ * to not send destination unreachable messages in such cases.
+ * This can only be toggled per BSS. Configure this on an interface of
+ * type %NL80211_IFTYPE_AP. It applies to all its VLAN interfaces
+ * (%NL80211_IFTYPE_AP_VLAN), except for those in 4addr (WDS) mode.
+ * If %NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED is not present with this
+ * command, the feature is disabled.
+ *
* @NL80211_CMD_JOIN_MESH: Join a mesh. The mesh ID must be given, and initial
* mesh config parameters may be given.
* @NL80211_CMD_LEAVE_MESH: Leave the mesh network -- no special arguments, the
@@ -874,6 +888,12 @@
* This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
* %NL80211_ATTR_COOKIE.
*
+ * @NL80211_CMD_UPDATE_CONNECT_PARAMS: Update one or more connect parameters
+ * for subsequent roaming cases if the driver or firmware uses internal
+ * BSS selection. This command can be issued only while connected and it
+ * does not result in a change for the current association. Currently,
+ * only the %NL80211_ATTR_IE data is used and updated with this command.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1069,6 +1089,10 @@ enum nl80211_commands {
NL80211_CMD_CHANGE_NAN_CONFIG,
NL80211_CMD_NAN_MATCH,
+ NL80211_CMD_SET_MULTICAST_TO_UNICAST,
+
+ NL80211_CMD_UPDATE_CONNECT_PARAMS,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1638,8 +1662,16 @@ enum nl80211_commands {
* the connection request from a station. nl80211_connect_failed_reason
* enum has different reasons of connection failure.
*
- * @NL80211_ATTR_SAE_DATA: SAE elements in Authentication frames. This starts
- * with the Authentication transaction sequence number field.
+ * @NL80211_ATTR_AUTH_DATA: Fields and elements in Authentication frames.
+ * This contains the authentication frame body (non-IE and IE data),
+ * excluding the Authentication algorithm number, i.e., starting at the
+ * Authentication transaction sequence number field. It is used with
+ * authentication algorithms that need special fields to be added into
+ * the frames (SAE and FILS). Currently, only the SAE cases use the
+ * initial two fields (Authentication transaction sequence number and
+ * Status code). However, those fields are included in the attribute data
+ * for all authentication algorithms to keep the attribute definition
+ * consistent.
*
* @NL80211_ATTR_VHT_CAPABILITY: VHT Capability information element (from
* association request when used with NL80211_CMD_NEW_STATION)
@@ -1936,6 +1968,17 @@ enum nl80211_commands {
* attribute.
* @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute.
* See &enum nl80211_nan_match_attributes.
+ * @NL80211_ATTR_FILS_KEK: KEK for FILS (Re)Association Request/Response frame
+ * protection.
+ * @NL80211_ATTR_FILS_NONCES: Nonces (part of AAD) for FILS (Re)Association
+ * Request/Response frame protection. This attribute contains the 16 octet
+ * STA Nonce followed by 16 octets of AP Nonce.
+ *
+ * @NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED: Indicates whether or not multicast
+ * packets should be send out as unicast to all stations (flag attribute).
+ *
+ * @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also
+ * used in various commands/events for specifying the BSSID.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2195,7 +2238,7 @@ enum nl80211_attrs {
NL80211_ATTR_CONN_FAILED_REASON,
- NL80211_ATTR_SAE_DATA,
+ NL80211_ATTR_AUTH_DATA,
NL80211_ATTR_VHT_CAPABILITY,
@@ -2336,6 +2379,13 @@ enum nl80211_attrs {
NL80211_ATTR_NAN_FUNC,
NL80211_ATTR_NAN_MATCH,
+ NL80211_ATTR_FILS_KEK,
+ NL80211_ATTR_FILS_NONCES,
+
+ NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED,
+
+ NL80211_ATTR_BSSID,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2347,6 +2397,7 @@ enum nl80211_attrs {
#define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION
#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
+#define NL80211_ATTR_SAE_DATA NL80211_ATTR_AUTH_DATA
/*
* Allow user space programs to use #ifdef on new attributes by defining them
@@ -3660,6 +3711,9 @@ enum nl80211_bss_status {
* @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r)
* @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP)
* @NL80211_AUTHTYPE_SAE: Simultaneous authentication of equals
+ * @NL80211_AUTHTYPE_FILS_SK: Fast Initial Link Setup shared key
+ * @NL80211_AUTHTYPE_FILS_SK_PFS: Fast Initial Link Setup shared key with PFS
+ * @NL80211_AUTHTYPE_FILS_PK: Fast Initial Link Setup public key
* @__NL80211_AUTHTYPE_NUM: internal
* @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm
* @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by
@@ -3672,6 +3726,9 @@ enum nl80211_auth_type {
NL80211_AUTHTYPE_FT,
NL80211_AUTHTYPE_NETWORK_EAP,
NL80211_AUTHTYPE_SAE,
+ NL80211_AUTHTYPE_FILS_SK,
+ NL80211_AUTHTYPE_FILS_SK_PFS,
+ NL80211_AUTHTYPE_FILS_PK,
/* keep last */
__NL80211_AUTHTYPE_NUM,
@@ -4280,6 +4337,9 @@ enum nl80211_iface_limit_attrs {
* of supported channel widths for radar detection.
* @NL80211_IFACE_COMB_RADAR_DETECT_REGIONS: u32 attribute containing the bitmap
* of supported regulatory regions for radar detection.
+ * @NL80211_IFACE_COMB_BI_MIN_GCD: u32 attribute specifying the minimum GCD of
+ * different beacon intervals supported by all the interface combinations
+ * in this group (if not present, all beacon intervals be identical).
* @NUM_NL80211_IFACE_COMB: number of attributes
* @MAX_NL80211_IFACE_COMB: highest attribute number
*
@@ -4287,8 +4347,8 @@ enum nl80211_iface_limit_attrs {
* limits = [ #{STA} <= 1, #{AP} <= 1 ], matching BI, channels = 1, max = 2
* => allows an AP and a STA that must match BIs
*
- * numbers = [ #{AP, P2P-GO} <= 8 ], channels = 1, max = 8
- * => allows 8 of AP/GO
+ * numbers = [ #{AP, P2P-GO} <= 8 ], BI min gcd, channels = 1, max = 8,
+ * => allows 8 of AP/GO that can have BI gcd >= min gcd
*
* numbers = [ #{STA} <= 2 ], channels = 2, max = 2
* => allows two STAs on different channels
@@ -4314,6 +4374,7 @@ enum nl80211_if_combination_attrs {
NL80211_IFACE_COMB_NUM_CHANNELS,
NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
+ NL80211_IFACE_COMB_BI_MIN_GCD,
/* keep last */
NUM_NL80211_IFACE_COMB,
@@ -4634,6 +4695,8 @@ enum nl80211_feature_flags {
* configuration (AP/mesh) with HT rates.
* @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
* configuration (AP/mesh) with VHT rates.
+ * @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup
+ * with user space SME (NL80211_CMD_AUTHENTICATE) in station mode.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4648,6 +4711,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
NL80211_EXT_FEATURE_BEACON_RATE_HT,
NL80211_EXT_FEATURE_BEACON_RATE_VHT,
+ NL80211_EXT_FEATURE_FILS_STA,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 59ed3992c760..375d812fea36 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -705,6 +705,15 @@ enum ovs_nat_attr {
#define OVS_NAT_ATTR_MAX (__OVS_NAT_ATTR_MAX - 1)
+/*
+ * struct ovs_action_push_eth - %OVS_ACTION_ATTR_PUSH_ETH action argument.
+ * @addresses: Source and destination MAC addresses.
+ * @eth_type: Ethernet type
+ */
+struct ovs_action_push_eth {
+ struct ovs_key_ethernet addresses;
+};
+
/**
* enum ovs_action_attr - Action types.
*
@@ -738,6 +747,10 @@ enum ovs_nat_attr {
* is no MPLS label stack, as determined by ethertype, no action is taken.
* @OVS_ACTION_ATTR_CT: Track the connection. Populate the conntrack-related
* entries in the flow key.
+ * @OVS_ACTION_ATTR_PUSH_ETH: Push a new outermost Ethernet header onto the
+ * packet.
+ * @OVS_ACTION_ATTR_POP_ETH: Pop the outermost Ethernet header off the
+ * packet.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -765,6 +778,8 @@ enum ovs_action_attr {
* bits. */
OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */
OVS_ACTION_ATTR_TRUNC, /* u32 struct ovs_action_trunc. */
+ OVS_ACTION_ATTR_PUSH_ETH, /* struct ovs_action_push_eth. */
+ OVS_ACTION_ATTR_POP_ETH, /* No argument. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index e5a2e68b2236..174d1147081b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -23,6 +23,14 @@
#define LINUX_PCI_REGS_H
/*
+ * Conventional PCI and PCI-X Mode 1 devices have 256 bytes of
+ * configuration space. PCI-X Mode 2 and PCIe devices have 4096 bytes of
+ * configuration space.
+ */
+#define PCI_CFG_SPACE_SIZE 256
+#define PCI_CFG_SPACE_EXP_SIZE 4096
+
+/*
* Under PCI, each device has 256 bytes of configuration address space,
* of which the first 64 bytes are standardized as follows:
*/
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 8fd715f806a2..cb4bcdc58543 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -397,6 +397,7 @@ enum {
TCA_BPF_NAME,
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
+ TCA_BPF_DIGEST,
__TCA_BPF_MAX,
};
@@ -447,11 +448,38 @@ enum {
TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
+ TCA_FLOWER_KEY_SCTP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_SCTP_DST_MASK, /* be16 */
+
+ TCA_FLOWER_KEY_SCTP_SRC, /* be16 */
+ TCA_FLOWER_KEY_SCTP_DST, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, /* be16 */
+ TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, /* be16 */
+ TCA_FLOWER_KEY_ENC_UDP_DST_PORT, /* be16 */
+ TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, /* be16 */
+
+ TCA_FLOWER_KEY_FLAGS, /* be32 */
+ TCA_FLOWER_KEY_FLAGS_MASK, /* be32 */
+
+ TCA_FLOWER_KEY_ICMPV4_CODE, /* u8 */
+ TCA_FLOWER_KEY_ICMPV4_CODE_MASK,/* u8 */
+ TCA_FLOWER_KEY_ICMPV4_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,/* u8 */
+ TCA_FLOWER_KEY_ICMPV6_CODE, /* u8 */
+ TCA_FLOWER_KEY_ICMPV6_CODE_MASK,/* u8 */
+ TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
+
__TCA_FLOWER_MAX,
};
#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+enum {
+ TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
+};
+
/* Match-all classifier */
enum {
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index c3e654c6d518..9930f3e9040f 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -84,6 +84,10 @@
#define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed
* For clustered enviroments only.
*/
+#define MD_DISK_FAILFAST 10 /* Send REQ_FAILFAST if there are multiple
+ * devices available - and don't try to
+ * correct read errors.
+ */
#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
* read requests will only be sent here in
@@ -265,8 +269,9 @@ struct mdp_superblock_1 {
__le32 dev_number; /* permanent identifier of this device - not role in raid */
__le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
__u8 device_uuid[16]; /* user-space setable, ignored by kernel */
- __u8 devflags; /* per-device flags. Only one defined...*/
+ __u8 devflags; /* per-device flags. Only two defined...*/
#define WriteMostly1 1 /* mask for writemostly flag in above */
+#define FailFast1 2 /* Should avoid retries and fixups and just fail */
/* Bad block log. If there are any bad blocks the feature flag is set.
* If offset and size are non-zero, that space is reserved and available
*/
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 5a78be518101..e14377f2ec27 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -318,6 +318,7 @@ enum rtattr_type_t {
RTA_ENCAP,
RTA_EXPIRES,
RTA_PAD,
+ RTA_UID,
__RTA_MAX
};
diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h
new file mode 100644
index 000000000000..c396a8052f73
--- /dev/null
+++ b/include/uapi/linux/seg6.h
@@ -0,0 +1,54 @@
+/*
+ * SR-IPv6 implementation
+ *
+ * Author:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_SEG6_H
+#define _UAPI_LINUX_SEG6_H
+
+/*
+ * SRH
+ */
+struct ipv6_sr_hdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+ __u8 type;
+ __u8 segments_left;
+ __u8 first_segment;
+ __u8 flag_1;
+ __u8 flag_2;
+ __u8 reserved;
+
+ struct in6_addr segments[0];
+};
+
+#define SR6_FLAG1_CLEANUP (1 << 7)
+#define SR6_FLAG1_PROTECTED (1 << 6)
+#define SR6_FLAG1_OAM (1 << 5)
+#define SR6_FLAG1_ALERT (1 << 4)
+#define SR6_FLAG1_HMAC (1 << 3)
+
+#define SR6_TLV_INGRESS 1
+#define SR6_TLV_EGRESS 2
+#define SR6_TLV_OPAQUE 3
+#define SR6_TLV_PADDING 4
+#define SR6_TLV_HMAC 5
+
+#define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP)
+#define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC)
+
+struct sr6_tlv {
+ __u8 type;
+ __u8 len;
+ __u8 data[0];
+};
+
+#endif
diff --git a/include/uapi/linux/seg6_genl.h b/include/uapi/linux/seg6_genl.h
new file mode 100644
index 000000000000..fcf1c60d7df3
--- /dev/null
+++ b/include/uapi/linux/seg6_genl.h
@@ -0,0 +1,32 @@
+#ifndef _UAPI_LINUX_SEG6_GENL_H
+#define _UAPI_LINUX_SEG6_GENL_H
+
+#define SEG6_GENL_NAME "SEG6"
+#define SEG6_GENL_VERSION 0x1
+
+enum {
+ SEG6_ATTR_UNSPEC,
+ SEG6_ATTR_DST,
+ SEG6_ATTR_DSTLEN,
+ SEG6_ATTR_HMACKEYID,
+ SEG6_ATTR_SECRET,
+ SEG6_ATTR_SECRETLEN,
+ SEG6_ATTR_ALGID,
+ SEG6_ATTR_HMACINFO,
+ __SEG6_ATTR_MAX,
+};
+
+#define SEG6_ATTR_MAX (__SEG6_ATTR_MAX - 1)
+
+enum {
+ SEG6_CMD_UNSPEC,
+ SEG6_CMD_SETHMAC,
+ SEG6_CMD_DUMPHMAC,
+ SEG6_CMD_SET_TUNSRC,
+ SEG6_CMD_GET_TUNSRC,
+ __SEG6_CMD_MAX,
+};
+
+#define SEG6_CMD_MAX (__SEG6_CMD_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/seg6_hmac.h b/include/uapi/linux/seg6_hmac.h
new file mode 100644
index 000000000000..b652dfd51bc5
--- /dev/null
+++ b/include/uapi/linux/seg6_hmac.h
@@ -0,0 +1,21 @@
+#ifndef _UAPI_LINUX_SEG6_HMAC_H
+#define _UAPI_LINUX_SEG6_HMAC_H
+
+#include <linux/seg6.h>
+
+#define SEG6_HMAC_SECRET_LEN 64
+#define SEG6_HMAC_FIELD_LEN 32
+
+struct sr6_tlv_hmac {
+ struct sr6_tlv tlvhdr;
+ __u16 reserved;
+ __be32 hmackeyid;
+ __u8 hmac[SEG6_HMAC_FIELD_LEN];
+};
+
+enum {
+ SEG6_HMAC_ALGO_SHA1 = 1,
+ SEG6_HMAC_ALGO_SHA256 = 2,
+};
+
+#endif
diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h
new file mode 100644
index 000000000000..0f7dbd280a9c
--- /dev/null
+++ b/include/uapi/linux/seg6_iptunnel.h
@@ -0,0 +1,44 @@
+/*
+ * SR-IPv6 implementation
+ *
+ * Author:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_SEG6_IPTUNNEL_H
+#define _UAPI_LINUX_SEG6_IPTUNNEL_H
+
+enum {
+ SEG6_IPTUNNEL_UNSPEC,
+ SEG6_IPTUNNEL_SRH,
+ __SEG6_IPTUNNEL_MAX,
+};
+#define SEG6_IPTUNNEL_MAX (__SEG6_IPTUNNEL_MAX - 1)
+
+struct seg6_iptunnel_encap {
+ int mode;
+ struct ipv6_sr_hdr srh[0];
+};
+
+#define SEG6_IPTUN_ENCAP_SIZE(x) ((sizeof(*x)) + (((x)->srh->hdrlen + 1) << 3))
+
+enum {
+ SEG6_IPTUN_MODE_INLINE,
+ SEG6_IPTUN_MODE_ENCAP,
+};
+
+static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
+{
+ int encap = (tuninfo->mode == SEG6_IPTUN_MODE_ENCAP);
+
+ return ((tuninfo->srh->hdrlen + 1) << 3) +
+ (encap * sizeof(struct ipv6hdr));
+}
+
+#endif
diff --git a/include/uapi/linux/sockios.h b/include/uapi/linux/sockios.h
index 8e7890b26d9a..79d029d25310 100644
--- a/include/uapi/linux/sockios.h
+++ b/include/uapi/linux/sockios.h
@@ -24,6 +24,8 @@
#define SIOCINQ FIONREAD
#define SIOCOUTQ TIOCOUTQ /* output queue size (not sent + not acked) */
+#define SOCK_IOC_TYPE 0x89
+
/* Routing table calls. */
#define SIOCADDRT 0x890B /* add routing table entry */
#define SIOCDELRT 0x890C /* delete routing table entry */
@@ -84,6 +86,7 @@
#define SIOCWANDEV 0x894A /* get/set netdev parameters */
#define SIOCOUTQNSD 0x894B /* output queue size (not sent only) */
+#define SIOCGSKNS 0x894C /* get socket network namespace */
/* ARP cache control calls. */
/* 0x8950 - 0x8952 * obsolete calls, don't re-use */
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 063d9d465119..a6b88a6f7f71 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -27,6 +27,7 @@ enum {
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
TCA_ACT_BPF_PAD,
+ TCA_ACT_BPF_DIGEST,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index a4d00c608d8f..2884425738ce 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -28,6 +28,7 @@
#define SKBEDIT_F_QUEUE_MAPPING 0x2
#define SKBEDIT_F_MARK 0x4
#define SKBEDIT_F_PTYPE 0x8
+#define SKBEDIT_F_MASK 0x10
struct tc_skbedit {
tc_gen;
@@ -42,6 +43,7 @@ enum {
TCA_SKBEDIT_MARK,
TCA_SKBEDIT_PAD,
TCA_SKBEDIT_PTYPE,
+ TCA_SKBEDIT_MASK,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 890106ff16e6..84ea55e1076b 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -33,6 +33,7 @@ enum {
TCA_TUNNEL_KEY_ENC_IPV6_DST, /* struct in6_addr */
TCA_TUNNEL_KEY_ENC_KEY_ID, /* be64 */
TCA_TUNNEL_KEY_PAD,
+ TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */
__TCA_TUNNEL_KEY_MAX,
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 73ac0db487f8..c53de2691cec 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -214,6 +214,18 @@ struct tcp_info {
__u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
__u64 tcpi_delivery_rate;
+
+ __u64 tcpi_busy_time; /* Time (usec) busy sending data */
+ __u64 tcpi_rwnd_limited; /* Time (usec) limited by receive window */
+ __u64 tcpi_sndbuf_limited; /* Time (usec) limited by send buffer */
+};
+
+/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
+enum {
+ TCP_NLA_PAD,
+ TCP_NLA_BUSY, /* Time (usec) busy sending data */
+ TCP_NLA_RWND_LIMITED, /* Time (usec) limited by receive window */
+ TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index acf0979b790a..41e5914f0a8e 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -23,11 +23,7 @@
#else
#define __bitwise__
#endif
-#ifdef __CHECK_ENDIAN__
#define __bitwise __bitwise__
-#else
-#define __bitwise
-#endif
typedef __u16 __bitwise __le16;
typedef __u16 __bitwise __be16;
diff --git a/include/uapi/linux/uleds.h b/include/uapi/linux/uleds.h
new file mode 100644
index 000000000000..95186578c46e
--- /dev/null
+++ b/include/uapi/linux/uleds.h
@@ -0,0 +1,24 @@
+/*
+ * Userspace driver support for the LED subsystem
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _UAPI__ULEDS_H_
+#define _UAPI__ULEDS_H_
+
+#define LED_MAX_NAME_SIZE 64
+
+struct uleds_user_dev {
+ char name[LED_MAX_NAME_SIZE];
+ int max_brightness;
+};
+
+#endif /* _UAPI__ULEDS_H_ */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index a8acc24765fe..2c5d7c4a69e3 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -423,6 +423,12 @@ struct usb_endpoint_descriptor {
#define USB_ENDPOINT_XFER_INT 3
#define USB_ENDPOINT_MAX_ADJUSTABLE 0x80
+#define USB_ENDPOINT_MAXP_MASK 0x07ff
+#define USB_EP_MAXP_MULT_SHIFT 11
+#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
+#define USB_EP_MAXP_MULT(m) \
+ (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
+
/* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
#define USB_ENDPOINT_INTRTYPE 0x30
#define USB_ENDPOINT_INTR_PERIODIC (0 << 4)
@@ -623,11 +629,25 @@ static inline int usb_endpoint_is_isoc_out(
* usb_endpoint_maxp - get endpoint's max packet size
* @epd: endpoint to be checked
*
- * Returns @epd's max packet
+ * Returns @epd's max packet bits [10:0]
*/
static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
{
- return __le16_to_cpu(epd->wMaxPacketSize);
+ return __le16_to_cpu(epd->wMaxPacketSize) & USB_ENDPOINT_MAXP_MASK;
+}
+
+/**
+ * usb_endpoint_maxp_mult - get endpoint's transactional opportunities
+ * @epd: endpoint to be checked
+ *
+ * Return @epd's wMaxPacketSize[12:11] + 1
+ */
+static inline int
+usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd)
+{
+ int maxp = __le16_to_cpu(epd->wMaxPacketSize);
+
+ return USB_EP_MAXP_MULT(maxp) + 1;
}
static inline int usb_endpoint_interrupt_type(
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index acc63697a0cc..b2a31a55a612 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -93,6 +93,7 @@ struct usb_ext_prop_desc {
* | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC_V2 |
* | 4 | length | LE32 | length of the whole data chunk |
* | 8 | flags | LE32 | combination of functionfs_flags |
+ * | | eventfd | LE32 | eventfd file descriptor |
* | | fs_count | LE32 | number of full-speed descriptors |
* | | hs_count | LE32 | number of high-speed descriptors |
* | | ss_count | LE32 | number of super-speed descriptors |
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index b6a357a5f053..0d2e1e01fbd5 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -892,6 +892,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_LINK_FREQ (V4L2_CID_IMAGE_PROC_CLASS_BASE + 1)
#define V4L2_CID_PIXEL_RATE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
#define V4L2_CID_TEST_PATTERN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
+#define V4L2_CID_DEINTERLACING_MODE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 4)
/* DV-class control IDs defined by V4L2 */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index f31957166337..da2955154381 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -1,7 +1,7 @@
/*
* V4L2 DV timings header.
*
- * Copyright (C) 2012 Hans Verkuil <hans.verkuil@cisco.com>
+ * Copyright (C) 2012-2016 Hans Verkuil <hans.verkuil@cisco.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -11,11 +11,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef _V4L2_DV_TIMINGS_H
@@ -33,13 +28,14 @@
.bt = { _width , ## args }
#endif
-/* CEA-861-E timings (i.e. standard HDTV timings) */
+/* CEA-861-F timings (i.e. standard HDTV timings) */
#define V4L2_DV_BT_CEA_640X480P59_94 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
25175000, 16, 96, 48, 10, 2, 33, 0, 0, 0, \
- V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 1) \
}
/* Note: these are the nominal timings, for HDMI links this format is typically
@@ -49,14 +45,18 @@
V4L2_INIT_BT_TIMINGS(720, 480, 1, 0, \
13500000, 19, 62, 57, 4, 3, 15, 4, 3, 16, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_PICTURE_ASPECT | V4L2_DV_FL_HAS_CEA861_VIC, \
+ { 4, 3 }, 6) \
}
#define V4L2_DV_BT_CEA_720X480P59_94 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 480, 0, 0, \
27000000, 16, 62, 60, 9, 6, 30, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_PICTURE_ASPECT | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 4, 3 }, 2) \
}
/* Note: these are the nominal timings, for HDMI links this format is typically
@@ -66,14 +66,18 @@
V4L2_INIT_BT_TIMINGS(720, 576, 1, 0, \
13500000, 12, 63, 69, 2, 3, 19, 2, 3, 20, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_PICTURE_ASPECT | V4L2_DV_FL_HAS_CEA861_VIC, \
+ { 4, 3 }, 21) \
}
#define V4L2_DV_BT_CEA_720X576P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 576, 0, 0, \
27000000, 12, 64, 68, 5, 5, 39, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_PICTURE_ASPECT | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 4, 3 }, 17) \
}
#define V4L2_DV_BT_CEA_1280X720P24 { \
@@ -82,7 +86,7 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
59400000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 60) \
}
#define V4L2_DV_BT_CEA_1280X720P25 { \
@@ -90,7 +94,8 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 2420, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 61) \
}
#define V4L2_DV_BT_CEA_1280X720P30 { \
@@ -99,7 +104,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 62) \
}
#define V4L2_DV_BT_CEA_1280X720P50 { \
@@ -107,7 +113,8 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 440, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 19) \
}
#define V4L2_DV_BT_CEA_1280X720P60 { \
@@ -116,7 +123,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 110, 40, 220, 5, 5, 20, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 4) \
}
#define V4L2_DV_BT_CEA_1920X1080P24 { \
@@ -125,7 +133,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 638, 44, 148, 4, 5, 36, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 32) \
}
#define V4L2_DV_BT_CEA_1920X1080P25 { \
@@ -133,7 +142,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 33) \
}
#define V4L2_DV_BT_CEA_1920X1080P30 { \
@@ -142,7 +152,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 34) \
}
#define V4L2_DV_BT_CEA_1920X1080I50 { \
@@ -151,7 +162,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 528, 44, 148, 2, 5, 15, 2, 5, 16, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 20) \
}
#define V4L2_DV_BT_CEA_1920X1080P50 { \
@@ -159,7 +171,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
148500000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 31) \
}
#define V4L2_DV_BT_CEA_1920X1080I60 { \
@@ -169,7 +182,8 @@
74250000, 88, 44, 148, 2, 5, 15, 2, 5, 16, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | \
- V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 5) \
}
#define V4L2_DV_BT_CEA_1920X1080P60 { \
@@ -178,7 +192,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
148500000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 16) \
}
#define V4L2_DV_BT_CEA_3840X2160P24 { \
@@ -187,7 +202,9 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC | V4L2_DV_FL_HAS_HDMI_VIC, \
+ { 0, 0 }, 93, 3) \
}
#define V4L2_DV_BT_CEA_3840X2160P25 { \
@@ -195,7 +212,9 @@
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC | \
+ V4L2_DV_FL_HAS_HDMI_VIC, { 0, 0 }, 94, 2) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
@@ -204,7 +223,9 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC | V4L2_DV_FL_HAS_HDMI_VIC, \
+ { 0, 0 }, 95, 1) \
}
#define V4L2_DV_BT_CEA_3840X2160P50 { \
@@ -212,7 +233,8 @@
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 96) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
@@ -221,7 +243,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 97) \
}
#define V4L2_DV_BT_CEA_4096X2160P24 { \
@@ -230,7 +253,9 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC | V4L2_DV_FL_HAS_HDMI_VIC, \
+ { 0, 0 }, 98, 4) \
}
#define V4L2_DV_BT_CEA_4096X2160P25 { \
@@ -238,7 +263,8 @@
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 99) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
@@ -247,7 +273,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 100) \
}
#define V4L2_DV_BT_CEA_4096X2160P50 { \
@@ -255,7 +282,8 @@
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 101) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
@@ -264,7 +292,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+ V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 102) \
}
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 255a2113f53c..519eff362c1c 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -203,6 +203,16 @@ struct vfio_device_info {
};
#define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
+/*
+ * Vendor driver using Mediated device framework should provide device_api
+ * attribute in supported type attribute groups. Device API string should be one
+ * of the following corresponding to device flags in vfio_device_info structure.
+ */
+
+#define VFIO_DEVICE_API_PCI_STRING "vfio-pci"
+#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
+#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
+
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
* struct vfio_region_info)
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 56b7ab584cc0..60180c0b5dc6 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -172,8 +172,6 @@ struct vhost_memory {
#define VHOST_F_LOG_ALL 26
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
-/* Vhost have device IOTLB */
-#define VHOST_F_DEVICE_IOTLB 63
/* VHOST_SCSI specific definitions */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 94f123f3e04e..46e8a2e369f9 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -335,6 +335,19 @@ enum v4l2_ycbcr_encoding {
};
/*
+ * enum v4l2_hsv_encoding values should not collide with the ones from
+ * enum v4l2_ycbcr_encoding.
+ */
+enum v4l2_hsv_encoding {
+
+ /* Hue mapped to 0 - 179 */
+ V4L2_HSV_ENC_180 = 128,
+
+ /* Hue mapped to 0-255 */
+ V4L2_HSV_ENC_256 = 129,
+};
+
+/*
* Determine how YCBCR_ENC_DEFAULT should map to a proper Y'CbCr encoding.
* This depends on the colorspace.
*/
@@ -362,9 +375,10 @@ enum v4l2_quantization {
* This depends on whether the image is RGB or not, the colorspace and the
* Y'CbCr encoding.
*/
-#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, colsp, ycbcr_enc) \
- (((is_rgb) && (colsp) == V4L2_COLORSPACE_BT2020) ? V4L2_QUANTIZATION_LIM_RANGE : \
- (((is_rgb) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
+#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
+ (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
+ V4L2_QUANTIZATION_LIM_RANGE : \
+ (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
(ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \
(colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
@@ -462,7 +476,12 @@ struct v4l2_pix_format {
__u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
- __u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */
+ union {
+ /* enum v4l2_ycbcr_encoding */
+ __u32 ycbcr_enc;
+ /* enum v4l2_hsv_encoding */
+ __u32 hsv_enc;
+ };
__u32 quantization; /* enum v4l2_quantization */
__u32 xfer_func; /* enum v4l2_xfer_func */
};
@@ -586,6 +605,13 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB16 v4l2_fourcc('R', 'G', '1', '6') /* 16 RGRG.. GBGB.. */
+
+/* HSV formats */
+#define V4L2_PIX_FMT_HSV24 v4l2_fourcc('H', 'S', 'V', '3')
+#define V4L2_PIX_FMT_HSV32 v4l2_fourcc('H', 'S', 'V', '4')
/* compressed formats */
#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */
@@ -603,6 +629,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
+#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -634,6 +661,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
+#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
@@ -1229,6 +1257,9 @@ struct v4l2_standard {
* (aka field 2) of interlaced field formats
* @standards: Standards the timing belongs to
* @flags: Flags
+ * @picture_aspect: The picture aspect ratio (hor/vert).
+ * @cea861_vic: VIC code as per the CEA-861 standard.
+ * @hdmi_vic: VIC code as per the HDMI standard.
* @reserved: Reserved fields, must be zeroed.
*
* A note regarding vertical interlaced timings: height refers to the total
@@ -1258,7 +1289,10 @@ struct v4l2_bt_timings {
__u32 il_vbackporch;
__u32 standards;
__u32 flags;
- __u32 reserved[14];
+ struct v4l2_fract picture_aspect;
+ __u8 cea861_vic;
+ __u8 hdmi_vic;
+ __u8 reserved[46];
} __attribute__ ((packed));
/* Interlaced or progressive format */
@@ -1278,39 +1312,66 @@ struct v4l2_bt_timings {
/* Flags */
-/* CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary
- GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
- intervals are reduced, allowing a higher resolution over the same
- bandwidth. This is a read-only flag. */
+/*
+ * CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary
+ * GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
+ * intervals are reduced, allowing a higher resolution over the same
+ * bandwidth. This is a read-only flag.
+ */
#define V4L2_DV_FL_REDUCED_BLANKING (1 << 0)
-/* CEA-861 specific: set for CEA-861 formats with a framerate of a multiple
- of six. These formats can be optionally played at 1 / 1.001 speed.
- This is a read-only flag. */
+/*
+ * CEA-861 specific: set for CEA-861 formats with a framerate of a multiple
+ * of six. These formats can be optionally played at 1 / 1.001 speed.
+ * This is a read-only flag.
+ */
#define V4L2_DV_FL_CAN_REDUCE_FPS (1 << 1)
-/* CEA-861 specific: only valid for video transmitters, the flag is cleared
- by receivers.
- If the framerate of the format is a multiple of six, then the pixelclock
- used to set up the transmitter is divided by 1.001 to make it compatible
- with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
- 29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate
- such frequencies, then the flag will also be cleared. */
+/*
+ * CEA-861 specific: only valid for video transmitters, the flag is cleared
+ * by receivers.
+ * If the framerate of the format is a multiple of six, then the pixelclock
+ * used to set up the transmitter is divided by 1.001 to make it compatible
+ * with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
+ * 29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate
+ * such frequencies, then the flag will also be cleared.
+ */
#define V4L2_DV_FL_REDUCED_FPS (1 << 2)
-/* Specific to interlaced formats: if set, then field 1 is really one half-line
- longer and field 2 is really one half-line shorter, so each field has
- exactly the same number of half-lines. Whether half-lines can be detected
- or used depends on the hardware. */
+/*
+ * Specific to interlaced formats: if set, then field 1 is really one half-line
+ * longer and field 2 is really one half-line shorter, so each field has
+ * exactly the same number of half-lines. Whether half-lines can be detected
+ * or used depends on the hardware.
+ */
#define V4L2_DV_FL_HALF_LINE (1 << 3)
-/* If set, then this is a Consumer Electronics (CE) video format. Such formats
+/*
+ * If set, then this is a Consumer Electronics (CE) video format. Such formats
* differ from other formats (commonly called IT formats) in that if RGB
* encoding is used then by default the RGB values use limited range (i.e.
* use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
- * except for the 640x480 format are CE formats. */
+ * except for the 640x480 format are CE formats.
+ */
#define V4L2_DV_FL_IS_CE_VIDEO (1 << 4)
/* Some formats like SMPTE-125M have an interlaced signal with a odd
* total height. For these formats, if this flag is set, the first
* field has the extra line. If not, it is the second field.
*/
-#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE (1 << 5)
+#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE (1 << 5)
+/*
+ * If set, then the picture_aspect field is valid. Otherwise assume that the
+ * pixels are square, so the picture aspect ratio is the same as the width to
+ * height ratio.
+ */
+#define V4L2_DV_FL_HAS_PICTURE_ASPECT (1 << 6)
+/*
+ * If set, then the cea861_vic field is valid and contains the Video
+ * Identification Code as per the CEA-861 standard.
+ */
+#define V4L2_DV_FL_HAS_CEA861_VIC (1 << 7)
+/*
+ * If set, then the hdmi_vic field is valid and contains the Video
+ * Identification Code as per the HDMI standard (HDMI Vendor Specific
+ * InfoFrame).
+ */
+#define V4L2_DV_FL_HAS_HDMI_VIC (1 << 8)
/* A few useful defines to calculate the total blanking and frame sizes */
#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -2006,7 +2067,10 @@ struct v4l2_pix_format_mplane {
struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES];
__u8 num_planes;
__u8 flags;
- __u8 ycbcr_enc;
+ union {
+ __u8 ycbcr_enc;
+ __u8 hsv_enc;
+ };
__u8 quantization;
__u8 xfer_func;
__u8 reserved[7];
diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h
new file mode 100644
index 000000000000..50cdc8aebfcf
--- /dev/null
+++ b/include/uapi/linux/virtio_crypto.h
@@ -0,0 +1,450 @@
+#ifndef _VIRTIO_CRYPTO_H
+#define _VIRTIO_CRYPTO_H
+/* This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <linux/types.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+
+#define VIRTIO_CRYPTO_SERVICE_CIPHER 0
+#define VIRTIO_CRYPTO_SERVICE_HASH 1
+#define VIRTIO_CRYPTO_SERVICE_MAC 2
+#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+
+#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
+
+struct virtio_crypto_ctrl_header {
+#define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02)
+#define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03)
+#define VIRTIO_CRYPTO_HASH_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02)
+#define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03)
+#define VIRTIO_CRYPTO_MAC_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02)
+#define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03)
+#define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
+#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+ __le32 opcode;
+ __le32 algo;
+ __le32 flag;
+ /* data virtqueue id */
+ __le32 queue_id;
+};
+
+struct virtio_crypto_cipher_session_para {
+#define VIRTIO_CRYPTO_NO_CIPHER 0
+#define VIRTIO_CRYPTO_CIPHER_ARC4 1
+#define VIRTIO_CRYPTO_CIPHER_AES_ECB 2
+#define VIRTIO_CRYPTO_CIPHER_AES_CBC 3
+#define VIRTIO_CRYPTO_CIPHER_AES_CTR 4
+#define VIRTIO_CRYPTO_CIPHER_DES_ECB 5
+#define VIRTIO_CRYPTO_CIPHER_DES_CBC 6
+#define VIRTIO_CRYPTO_CIPHER_3DES_ECB 7
+#define VIRTIO_CRYPTO_CIPHER_3DES_CBC 8
+#define VIRTIO_CRYPTO_CIPHER_3DES_CTR 9
+#define VIRTIO_CRYPTO_CIPHER_KASUMI_F8 10
+#define VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2 11
+#define VIRTIO_CRYPTO_CIPHER_AES_F8 12
+#define VIRTIO_CRYPTO_CIPHER_AES_XTS 13
+#define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14
+ __le32 algo;
+ /* length of key */
+ __le32 keylen;
+
+#define VIRTIO_CRYPTO_OP_ENCRYPT 1
+#define VIRTIO_CRYPTO_OP_DECRYPT 2
+ /* encrypt or decrypt */
+ __le32 op;
+ __le32 padding;
+};
+
+struct virtio_crypto_session_input {
+ /* Device-writable part */
+ __le64 session_id;
+ __le32 status;
+ __le32 padding;
+};
+
+struct virtio_crypto_cipher_session_req {
+ struct virtio_crypto_cipher_session_para para;
+ __u8 padding[32];
+};
+
+struct virtio_crypto_hash_session_para {
+#define VIRTIO_CRYPTO_NO_HASH 0
+#define VIRTIO_CRYPTO_HASH_MD5 1
+#define VIRTIO_CRYPTO_HASH_SHA1 2
+#define VIRTIO_CRYPTO_HASH_SHA_224 3
+#define VIRTIO_CRYPTO_HASH_SHA_256 4
+#define VIRTIO_CRYPTO_HASH_SHA_384 5
+#define VIRTIO_CRYPTO_HASH_SHA_512 6
+#define VIRTIO_CRYPTO_HASH_SHA3_224 7
+#define VIRTIO_CRYPTO_HASH_SHA3_256 8
+#define VIRTIO_CRYPTO_HASH_SHA3_384 9
+#define VIRTIO_CRYPTO_HASH_SHA3_512 10
+#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11
+#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12
+ __le32 algo;
+ /* hash result length */
+ __le32 hash_result_len;
+ __u8 padding[8];
+};
+
+struct virtio_crypto_hash_create_session_req {
+ struct virtio_crypto_hash_session_para para;
+ __u8 padding[40];
+};
+
+struct virtio_crypto_mac_session_para {
+#define VIRTIO_CRYPTO_NO_MAC 0
+#define VIRTIO_CRYPTO_MAC_HMAC_MD5 1
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA1 2
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_224 3
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_256 4
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_384 5
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_512 6
+#define VIRTIO_CRYPTO_MAC_CMAC_3DES 25
+#define VIRTIO_CRYPTO_MAC_CMAC_AES 26
+#define VIRTIO_CRYPTO_MAC_KASUMI_F9 27
+#define VIRTIO_CRYPTO_MAC_SNOW3G_UIA2 28
+#define VIRTIO_CRYPTO_MAC_GMAC_AES 41
+#define VIRTIO_CRYPTO_MAC_GMAC_TWOFISH 42
+#define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49
+#define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50
+#define VIRTIO_CRYPTO_MAC_XCBC_AES 53
+ __le32 algo;
+ /* hash result length */
+ __le32 hash_result_len;
+ /* length of authenticated key */
+ __le32 auth_key_len;
+ __le32 padding;
+};
+
+struct virtio_crypto_mac_create_session_req {
+ struct virtio_crypto_mac_session_para para;
+ __u8 padding[40];
+};
+
+struct virtio_crypto_aead_session_para {
+#define VIRTIO_CRYPTO_NO_AEAD 0
+#define VIRTIO_CRYPTO_AEAD_GCM 1
+#define VIRTIO_CRYPTO_AEAD_CCM 2
+#define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3
+ __le32 algo;
+ /* length of key */
+ __le32 key_len;
+ /* hash result length */
+ __le32 hash_result_len;
+ /* length of the additional authenticated data (AAD) in bytes */
+ __le32 aad_len;
+ /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */
+ __le32 op;
+ __le32 padding;
+};
+
+struct virtio_crypto_aead_create_session_req {
+ struct virtio_crypto_aead_session_para para;
+ __u8 padding[32];
+};
+
+struct virtio_crypto_alg_chain_session_para {
+#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
+#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
+ __le32 alg_chain_order;
+/* Plain hash */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1
+/* Authenticated hash (mac) */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2
+/* Nested hash */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3
+ __le32 hash_mode;
+ struct virtio_crypto_cipher_session_para cipher_param;
+ union {
+ struct virtio_crypto_hash_session_para hash_param;
+ struct virtio_crypto_mac_session_para mac_param;
+ __u8 padding[16];
+ } u;
+ /* length of the additional authenticated data (AAD) in bytes */
+ __le32 aad_len;
+ __le32 padding;
+};
+
+struct virtio_crypto_alg_chain_session_req {
+ struct virtio_crypto_alg_chain_session_para para;
+};
+
+struct virtio_crypto_sym_create_session_req {
+ union {
+ struct virtio_crypto_cipher_session_req cipher;
+ struct virtio_crypto_alg_chain_session_req chain;
+ __u8 padding[48];
+ } u;
+
+ /* Device-readable part */
+
+/* No operation */
+#define VIRTIO_CRYPTO_SYM_OP_NONE 0
+/* Cipher only operation on the data */
+#define VIRTIO_CRYPTO_SYM_OP_CIPHER 1
+/*
+ * Chain any cipher with any hash or mac operation. The order
+ * depends on the value of alg_chain_order param
+ */
+#define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2
+ __le32 op_type;
+ __le32 padding;
+};
+
+struct virtio_crypto_destroy_session_req {
+ /* Device-readable part */
+ __le64 session_id;
+ __u8 padding[48];
+};
+
+/* The request of the control virtqueue's packet */
+struct virtio_crypto_op_ctrl_req {
+ struct virtio_crypto_ctrl_header header;
+
+ union {
+ struct virtio_crypto_sym_create_session_req
+ sym_create_session;
+ struct virtio_crypto_hash_create_session_req
+ hash_create_session;
+ struct virtio_crypto_mac_create_session_req
+ mac_create_session;
+ struct virtio_crypto_aead_create_session_req
+ aead_create_session;
+ struct virtio_crypto_destroy_session_req
+ destroy_session;
+ __u8 padding[56];
+ } u;
+};
+
+struct virtio_crypto_op_header {
+#define VIRTIO_CRYPTO_CIPHER_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00)
+#define VIRTIO_CRYPTO_CIPHER_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01)
+#define VIRTIO_CRYPTO_HASH \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00)
+#define VIRTIO_CRYPTO_MAC \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00)
+#define VIRTIO_CRYPTO_AEAD_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
+#define VIRTIO_CRYPTO_AEAD_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+ __le32 opcode;
+ /* algo should be service-specific algorithms */
+ __le32 algo;
+ /* session_id should be service-specific algorithms */
+ __le64 session_id;
+ /* control flag to control the request */
+ __le32 flag;
+ __le32 padding;
+};
+
+struct virtio_crypto_cipher_para {
+ /*
+ * Byte Length of valid IV/Counter
+ *
+ * For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for
+ * SNOW3G in UEA2 mode, this is the length of the IV (which
+ * must be the same as the block length of the cipher).
+ * For block ciphers in CTR mode, this is the length of the counter
+ * (which must be the same as the block length of the cipher).
+ * For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007.
+ *
+ * The IV/Counter will be updated after every partial cryptographic
+ * operation.
+ */
+ __le32 iv_len;
+ /* length of source data */
+ __le32 src_data_len;
+ /* length of dst data */
+ __le32 dst_data_len;
+ __le32 padding;
+};
+
+struct virtio_crypto_hash_para {
+ /* length of source data */
+ __le32 src_data_len;
+ /* hash result length */
+ __le32 hash_result_len;
+};
+
+struct virtio_crypto_mac_para {
+ struct virtio_crypto_hash_para hash;
+};
+
+struct virtio_crypto_aead_para {
+ /*
+ * Byte Length of valid IV data pointed to by the below iv_addr
+ * parameter.
+ *
+ * For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which
+ * case iv_addr points to J0.
+ * For CCM mode, this is the length of the nonce, which can be in the
+ * range 7 to 13 inclusive.
+ */
+ __le32 iv_len;
+ /* length of additional auth data */
+ __le32 aad_len;
+ /* length of source data */
+ __le32 src_data_len;
+ /* length of dst data */
+ __le32 dst_data_len;
+};
+
+struct virtio_crypto_cipher_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_cipher_para para;
+ __u8 padding[24];
+};
+
+struct virtio_crypto_hash_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_hash_para para;
+ __u8 padding[40];
+};
+
+struct virtio_crypto_mac_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_mac_para para;
+ __u8 padding[40];
+};
+
+struct virtio_crypto_alg_chain_data_para {
+ __le32 iv_len;
+ /* Length of source data */
+ __le32 src_data_len;
+ /* Length of destination data */
+ __le32 dst_data_len;
+ /* Starting point for cipher processing in source data */
+ __le32 cipher_start_src_offset;
+ /* Length of the source data that the cipher will be computed on */
+ __le32 len_to_cipher;
+ /* Starting point for hash processing in source data */
+ __le32 hash_start_src_offset;
+ /* Length of the source data that the hash will be computed on */
+ __le32 len_to_hash;
+ /* Length of the additional auth data */
+ __le32 aad_len;
+ /* Length of the hash result */
+ __le32 hash_result_len;
+ __le32 reserved;
+};
+
+struct virtio_crypto_alg_chain_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_alg_chain_data_para para;
+};
+
+struct virtio_crypto_sym_data_req {
+ union {
+ struct virtio_crypto_cipher_data_req cipher;
+ struct virtio_crypto_alg_chain_data_req chain;
+ __u8 padding[40];
+ } u;
+
+ /* See above VIRTIO_CRYPTO_SYM_OP_* */
+ __le32 op_type;
+ __le32 padding;
+};
+
+struct virtio_crypto_aead_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_aead_para para;
+ __u8 padding[32];
+};
+
+/* The request of the data virtqueue's packet */
+struct virtio_crypto_op_data_req {
+ struct virtio_crypto_op_header header;
+
+ union {
+ struct virtio_crypto_sym_data_req sym_req;
+ struct virtio_crypto_hash_data_req hash_req;
+ struct virtio_crypto_mac_data_req mac_req;
+ struct virtio_crypto_aead_data_req aead_req;
+ __u8 padding[48];
+ } u;
+};
+
+#define VIRTIO_CRYPTO_OK 0
+#define VIRTIO_CRYPTO_ERR 1
+#define VIRTIO_CRYPTO_BADMSG 2
+#define VIRTIO_CRYPTO_NOTSUPP 3
+#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
+
+/* The accelerator hardware is ready */
+#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
+
+struct virtio_crypto_config {
+ /* See VIRTIO_CRYPTO_OP_* above */
+ __u32 status;
+
+ /*
+ * Maximum number of data queue
+ */
+ __u32 max_dataqueues;
+
+ /*
+ * Specifies the services mask which the device support,
+ * see VIRTIO_CRYPTO_SERVICE_* above
+ */
+ __u32 crypto_services;
+
+ /* Detailed algorithms mask */
+ __u32 cipher_algo_l;
+ __u32 cipher_algo_h;
+ __u32 hash_algo;
+ __u32 mac_algo_l;
+ __u32 mac_algo_h;
+ __u32 aead_algo;
+ /* Maximum length of cipher key */
+ __u32 max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ __u32 max_auth_key_len;
+ __u32 reserve;
+ /* Maximum size of each crypto request's content */
+ __u64 max_size;
+};
+
+struct virtio_crypto_inhdr {
+ /* See VIRTIO_CRYPTO_* above */
+ __u8 status;
+};
+#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 3228d582234a..6d5c3b2d4f4d 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -42,5 +42,6 @@
#define VIRTIO_ID_GPU 16 /* virtio GPU */
#define VIRTIO_ID_INPUT 18 /* virtio input */
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
+#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_types.h b/include/uapi/linux/virtio_types.h
index e845e8c4cbee..55c3b738722c 100644
--- a/include/uapi/linux/virtio_types.h
+++ b/include/uapi/linux/virtio_types.h
@@ -39,8 +39,8 @@
* - __le{16,32,64} for standard-compliant virtio devices
*/
-typedef __u16 __bitwise__ __virtio16;
-typedef __u32 __bitwise__ __virtio32;
-typedef __u64 __bitwise__ __virtio64;
+typedef __u16 __bitwise __virtio16;
+typedef __u32 __bitwise __virtio32;
+typedef __u64 __bitwise __virtio64;
#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */
diff --git a/include/uapi/linux/vtpm_proxy.h b/include/uapi/linux/vtpm_proxy.h
index 41e8e2252a30..a69e991eb080 100644
--- a/include/uapi/linux/vtpm_proxy.h
+++ b/include/uapi/linux/vtpm_proxy.h
@@ -1,6 +1,7 @@
/*
* Definitions for the VTPM proxy driver
* Copyright (c) 2015, 2016, IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -18,8 +19,23 @@
#include <linux/types.h>
#include <linux/ioctl.h>
-/* ioctls */
+/**
+ * enum vtpm_proxy_flags - flags for the proxy TPM
+ * @VTPM_PROXY_FLAG_TPM2: the proxy TPM uses TPM 2.0 protocol
+ */
+enum vtpm_proxy_flags {
+ VTPM_PROXY_FLAG_TPM2 = 1,
+};
+/**
+ * struct vtpm_proxy_new_dev - parameter structure for the
+ * %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @flags: flags for the proxy TPM
+ * @tpm_num: index of the TPM device
+ * @fd: the file descriptor used by the proxy TPM
+ * @major: the major number of the TPM device
+ * @minor: the minor number of the TPM device
+ */
struct vtpm_proxy_new_dev {
__u32 flags; /* input */
__u32 tpm_num; /* output */
@@ -28,9 +44,6 @@ struct vtpm_proxy_new_dev {
__u32 minor; /* output */
};
-/* above flags */
-#define VTPM_PROXY_FLAG_TPM2 1 /* emulator is TPM 2 */
-
-#define VTPM_PROXY_IOC_NEW_DEV _IOWR(0xa1, 0x00, struct vtpm_proxy_new_dev)
+#define VTPM_PROXY_IOC_NEW_DEV _IOWR(0xa1, 0x00, struct vtpm_proxy_new_dev)
#endif /* _UAPI_LINUX_VTPM_PROXY_H */
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index f14ab7ff5fee..82bdf5626859 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -14,3 +14,5 @@ header-y += mlx5-abi.h
header-y += mthca-abi.h
header-y += nes-abi.h
header-y += ocrdma-abi.h
+header-y += hns-abi.h
+header-y += vmw_pvrdma-abi.h
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index d15e7289d835..587b7360e820 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -75,7 +75,7 @@
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
-#define HFI1_USER_SWMINOR 2
+#define HFI1_USER_SWMINOR 3
/*
* We will encode the major/minor inside a single 32bit version number.
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
new file mode 100644
index 000000000000..5d7401963e35
--- /dev/null
+++ b/include/uapi/rdma/hns-abi.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef HNS_ABI_USER_H
+#define HNS_ABI_USER_H
+
+#include <linux/types.h>
+
+struct hns_roce_ib_create_cq {
+ __u64 buf_addr;
+};
+
+struct hns_roce_ib_create_qp {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u8 log_sq_bb_count;
+ __u8 log_sq_stride;
+ __u8 sq_no_prefetch;
+ __u8 reserved[5];
+};
+
+struct hns_roce_ib_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+};
+#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 25225ebbc7d5..dfdfe4e92d31 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -37,6 +37,7 @@
#define IB_USER_VERBS_H
#include <linux/types.h>
+#include <rdma/ib_verbs.h>
/*
* Increment this value if any changes that break userspace ABI
@@ -93,6 +94,7 @@ enum {
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP,
+ IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
IB_USER_VERBS_EX_CMD_CREATE_WQ,
@@ -545,6 +547,14 @@ enum {
IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE,
};
+enum {
+ IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN
+};
+
+enum {
+ IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT
+};
+
struct ib_uverbs_ex_create_qp {
__u64 user_handle;
__u32 pd_handle;
@@ -684,9 +694,20 @@ struct ib_uverbs_modify_qp {
__u64 driver_data[0];
};
+struct ib_uverbs_ex_modify_qp {
+ struct ib_uverbs_modify_qp base;
+ __u32 rate_limit;
+ __u32 reserved;
+};
+
struct ib_uverbs_modify_qp_resp {
};
+struct ib_uverbs_ex_modify_qp_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+};
+
struct ib_uverbs_destroy_qp {
__u64 response;
__u32 qp_handle;
@@ -908,6 +929,23 @@ struct ib_uverbs_flow_spec_ipv6 {
struct ib_uverbs_flow_ipv6_filter mask;
};
+struct ib_uverbs_flow_tunnel_filter {
+ __be32 tunnel_id;
+};
+
+struct ib_uverbs_flow_spec_tunnel {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_tunnel_filter val;
+ struct ib_uverbs_flow_tunnel_filter mask;
+};
+
struct ib_uverbs_flow_attr {
__u32 type;
__u16 size;
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index f5d0f4e83b59..fae6cdaeb56d 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -82,6 +82,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
enum mlx5_user_cmds_supp_uhw {
MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
+ MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
};
struct mlx5_ib_alloc_ucontext_resp {
@@ -124,18 +125,47 @@ struct mlx5_ib_rss_caps {
__u8 reserved[7];
};
+enum mlx5_ib_cqe_comp_res_format {
+ MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
+ MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
+ MLX5_IB_CQE_RES_RESERVED = 1 << 2,
+};
+
+struct mlx5_ib_cqe_comp_caps {
+ __u32 max_num;
+ __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
+};
+
+struct mlx5_packet_pacing_caps {
+ __u32 qp_rate_limit_min;
+ __u32 qp_rate_limit_max; /* In kpbs */
+
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_RAW_PACKET
+ */
+ __u32 supported_qpts;
+ __u32 reserved;
+};
+
struct mlx5_ib_query_device_resp {
__u32 comp_mask;
__u32 response_length;
struct mlx5_ib_tso_caps tso_caps;
struct mlx5_ib_rss_caps rss_caps;
+ struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
+ struct mlx5_packet_pacing_caps packet_pacing_caps;
+ __u32 mlx5_ib_support_multi_pkt_send_wqes;
+ __u32 reserved;
};
struct mlx5_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
__u32 cqe_size;
- __u32 reserved; /* explicit padding (optional on i386) */
+ __u8 cqe_comp_en;
+ __u8 cqe_comp_res_format;
+ __u16 reserved; /* explicit padding (optional on i386) */
};
struct mlx5_ib_create_cq_resp {
@@ -232,6 +262,12 @@ struct mlx5_ib_create_wq {
__u32 reserved;
};
+struct mlx5_ib_create_ah_resp {
+ __u32 response_length;
+ __u8 dmac[ETH_ALEN];
+ __u8 reserved[6];
+};
+
struct mlx5_ib_create_wq_resp {
__u32 response_length;
__u32 reserved;
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 01923d463673..d71da36e3cd6 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -110,7 +110,7 @@ struct rdma_ucm_bind {
__u32 id;
__u16 addr_size;
__u16 reserved;
- struct sockaddr_storage addr;
+ struct __kernel_sockaddr_storage addr;
};
struct rdma_ucm_resolve_ip {
@@ -126,8 +126,8 @@ struct rdma_ucm_resolve_addr {
__u16 src_size;
__u16 dst_size;
__u32 reserved;
- struct sockaddr_storage src_addr;
- struct sockaddr_storage dst_addr;
+ struct __kernel_sockaddr_storage src_addr;
+ struct __kernel_sockaddr_storage dst_addr;
};
struct rdma_ucm_resolve_route {
@@ -164,8 +164,8 @@ struct rdma_ucm_query_addr_resp {
__u16 pkey;
__u16 src_size;
__u16 dst_size;
- struct sockaddr_storage src_addr;
- struct sockaddr_storage dst_addr;
+ struct __kernel_sockaddr_storage src_addr;
+ struct __kernel_sockaddr_storage dst_addr;
};
struct rdma_ucm_query_path_resp {
@@ -257,7 +257,7 @@ struct rdma_ucm_join_mcast {
__u32 id;
__u16 addr_size;
__u16 join_flags;
- struct sockaddr_storage addr;
+ struct __kernel_sockaddr_storage addr;
};
struct rdma_ucm_get_event {
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
new file mode 100644
index 000000000000..5016abc9ee97
--- /dev/null
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __VMW_PVRDMA_ABI_H__
+#define __VMW_PVRDMA_ABI_H__
+
+#include <linux/types.h>
+
+#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
+#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
+#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
+#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */
+#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */
+#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
+#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */
+#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */
+#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */
+
+enum pvrdma_wr_opcode {
+ PVRDMA_WR_RDMA_WRITE,
+ PVRDMA_WR_RDMA_WRITE_WITH_IMM,
+ PVRDMA_WR_SEND,
+ PVRDMA_WR_SEND_WITH_IMM,
+ PVRDMA_WR_RDMA_READ,
+ PVRDMA_WR_ATOMIC_CMP_AND_SWP,
+ PVRDMA_WR_ATOMIC_FETCH_AND_ADD,
+ PVRDMA_WR_LSO,
+ PVRDMA_WR_SEND_WITH_INV,
+ PVRDMA_WR_RDMA_READ_WITH_INV,
+ PVRDMA_WR_LOCAL_INV,
+ PVRDMA_WR_FAST_REG_MR,
+ PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP,
+ PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+ PVRDMA_WR_BIND_MW,
+ PVRDMA_WR_REG_SIG_MR,
+};
+
+enum pvrdma_wc_status {
+ PVRDMA_WC_SUCCESS,
+ PVRDMA_WC_LOC_LEN_ERR,
+ PVRDMA_WC_LOC_QP_OP_ERR,
+ PVRDMA_WC_LOC_EEC_OP_ERR,
+ PVRDMA_WC_LOC_PROT_ERR,
+ PVRDMA_WC_WR_FLUSH_ERR,
+ PVRDMA_WC_MW_BIND_ERR,
+ PVRDMA_WC_BAD_RESP_ERR,
+ PVRDMA_WC_LOC_ACCESS_ERR,
+ PVRDMA_WC_REM_INV_REQ_ERR,
+ PVRDMA_WC_REM_ACCESS_ERR,
+ PVRDMA_WC_REM_OP_ERR,
+ PVRDMA_WC_RETRY_EXC_ERR,
+ PVRDMA_WC_RNR_RETRY_EXC_ERR,
+ PVRDMA_WC_LOC_RDD_VIOL_ERR,
+ PVRDMA_WC_REM_INV_RD_REQ_ERR,
+ PVRDMA_WC_REM_ABORT_ERR,
+ PVRDMA_WC_INV_EECN_ERR,
+ PVRDMA_WC_INV_EEC_STATE_ERR,
+ PVRDMA_WC_FATAL_ERR,
+ PVRDMA_WC_RESP_TIMEOUT_ERR,
+ PVRDMA_WC_GENERAL_ERR,
+};
+
+enum pvrdma_wc_opcode {
+ PVRDMA_WC_SEND,
+ PVRDMA_WC_RDMA_WRITE,
+ PVRDMA_WC_RDMA_READ,
+ PVRDMA_WC_COMP_SWAP,
+ PVRDMA_WC_FETCH_ADD,
+ PVRDMA_WC_BIND_MW,
+ PVRDMA_WC_LSO,
+ PVRDMA_WC_LOCAL_INV,
+ PVRDMA_WC_FAST_REG_MR,
+ PVRDMA_WC_MASKED_COMP_SWAP,
+ PVRDMA_WC_MASKED_FETCH_ADD,
+ PVRDMA_WC_RECV = 1 << 7,
+ PVRDMA_WC_RECV_RDMA_WITH_IMM,
+};
+
+enum pvrdma_wc_flags {
+ PVRDMA_WC_GRH = 1 << 0,
+ PVRDMA_WC_WITH_IMM = 1 << 1,
+ PVRDMA_WC_WITH_INVALIDATE = 1 << 2,
+ PVRDMA_WC_IP_CSUM_OK = 1 << 3,
+ PVRDMA_WC_WITH_SMAC = 1 << 4,
+ PVRDMA_WC_WITH_VLAN = 1 << 5,
+ PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_VLAN,
+};
+
+struct pvrdma_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 reserved;
+};
+
+struct pvrdma_alloc_pd_resp {
+ __u32 pdn;
+ __u32 reserved;
+};
+
+struct pvrdma_create_cq {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 reserved;
+};
+
+struct pvrdma_create_cq_resp {
+ __u32 cqn;
+ __u32 reserved;
+};
+
+struct pvrdma_resize_cq {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 reserved;
+};
+
+struct pvrdma_create_srq {
+ __u64 buf_addr;
+};
+
+struct pvrdma_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
+struct pvrdma_create_qp {
+ __u64 rbuf_addr;
+ __u64 sbuf_addr;
+ __u32 rbuf_size;
+ __u32 sbuf_size;
+ __u64 qp_addr;
+};
+
+/* PVRDMA masked atomic compare and swap */
+struct pvrdma_ex_cmp_swap {
+ __u64 swap_val;
+ __u64 compare_val;
+ __u64 swap_mask;
+ __u64 compare_mask;
+};
+
+/* PVRDMA masked atomic fetch and add */
+struct pvrdma_ex_fetch_add {
+ __u64 add_val;
+ __u64 field_boundary;
+};
+
+/* PVRDMA address vector. */
+struct pvrdma_av {
+ __u32 port_pd;
+ __u32 sl_tclass_flowlabel;
+ __u8 dgid[16];
+ __u8 src_path_bits;
+ __u8 gid_index;
+ __u8 stat_rate;
+ __u8 hop_limit;
+ __u8 dmac[6];
+ __u8 reserved[6];
+};
+
+/* PVRDMA scatter/gather entry */
+struct pvrdma_sge {
+ __u64 addr;
+ __u32 length;
+ __u32 lkey;
+};
+
+/* PVRDMA receive queue work request */
+struct pvrdma_rq_wqe_hdr {
+ __u64 wr_id; /* wr id */
+ __u32 num_sge; /* size of s/g array */
+ __u32 total_len; /* reserved */
+};
+/* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */
+
+/* PVRDMA send queue work request */
+struct pvrdma_sq_wqe_hdr {
+ __u64 wr_id; /* wr id */
+ __u32 num_sge; /* size of s/g array */
+ __u32 total_len; /* reserved */
+ __u32 opcode; /* operation type */
+ __u32 send_flags; /* wr flags */
+ union {
+ __u32 imm_data;
+ __u32 invalidate_rkey;
+ } ex;
+ __u32 reserved;
+ union {
+ struct {
+ __u64 remote_addr;
+ __u32 rkey;
+ __u8 reserved[4];
+ } rdma;
+ struct {
+ __u64 remote_addr;
+ __u64 compare_add;
+ __u64 swap;
+ __u32 rkey;
+ __u32 reserved;
+ } atomic;
+ struct {
+ __u64 remote_addr;
+ __u32 log_arg_sz;
+ __u32 rkey;
+ union {
+ struct pvrdma_ex_cmp_swap cmp_swap;
+ struct pvrdma_ex_fetch_add fetch_add;
+ } wr_data;
+ } masked_atomics;
+ struct {
+ __u64 iova_start;
+ __u64 pl_pdir_dma;
+ __u32 page_shift;
+ __u32 page_list_len;
+ __u32 length;
+ __u32 access_flags;
+ __u32 rkey;
+ } fast_reg;
+ struct {
+ __u32 remote_qpn;
+ __u32 remote_qkey;
+ struct pvrdma_av av;
+ } ud;
+ } wr;
+};
+/* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */
+
+/* Completion queue element. */
+struct pvrdma_cqe {
+ __u64 wr_id;
+ __u64 qp;
+ __u32 opcode;
+ __u32 status;
+ __u32 byte_len;
+ __u32 imm_data;
+ __u32 src_qp;
+ __u32 wc_flags;
+ __u32 vendor_err;
+ __u16 pkey_index;
+ __u16 slid;
+ __u8 sl;
+ __u8 dlid_path_bits;
+ __u8 port_num;
+ __u8 smac[6];
+ __u8 reserved2[7]; /* Pad to next power of 2 (64). */
+};
+
+#endif /* __VMW_PVRDMA_ABI_H__ */
diff --git a/include/uapi/scsi/fc/fc_fs.h b/include/uapi/scsi/fc/fc_fs.h
index 50f28b143451..dcf314dc2a27 100644
--- a/include/uapi/scsi/fc/fc_fs.h
+++ b/include/uapi/scsi/fc/fc_fs.h
@@ -190,6 +190,7 @@ enum fc_fh_type {
FC_TYPE_FCP = 0x08, /* SCSI FCP */
FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */
FC_TYPE_ILS = 0x22, /* internal link service */
+ FC_TYPE_NVME = 0x28, /* FC-NVME */
};
/*
@@ -203,6 +204,7 @@ enum fc_fh_type {
[FC_TYPE_FCP] = "FCP", \
[FC_TYPE_CT] = "CT", \
[FC_TYPE_ILS] = "ILS", \
+ [FC_TYPE_NVME] = "NVME", \
}
/*
diff --git a/include/video/display_timing.h b/include/video/display_timing.h
index 28d9d0d566ca..3d289e990aca 100644
--- a/include/video/display_timing.h
+++ b/include/video/display_timing.h
@@ -28,6 +28,10 @@ enum display_flags {
DISPLAY_FLAGS_INTERLACED = BIT(8),
DISPLAY_FLAGS_DOUBLESCAN = BIT(9),
DISPLAY_FLAGS_DOUBLECLK = BIT(10),
+ /* drive sync on pos. edge */
+ DISPLAY_FLAGS_SYNC_POSEDGE = BIT(11),
+ /* drive sync on neg. edge */
+ DISPLAY_FLAGS_SYNC_NEGEDGE = BIT(12),
};
/*
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 173073eb6aaf..53cd07ccaa4c 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -247,8 +247,6 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset,
unsigned int v_offset);
-void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
- u32 pixel_format, int stride, int height);
int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image);
void ipu_cpmem_dump(struct ipuv3_channel *ch);
@@ -320,6 +318,7 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
bool ipu_csi_is_interlaced(struct ipu_csi *csi);
void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w);
void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w);
+void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert);
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
u32 pix_clk);
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
index ea755b5616d8..956455fc9f9a 100644
--- a/include/video/of_display_timing.h
+++ b/include/video/of_display_timing.h
@@ -16,21 +16,22 @@ struct display_timings;
#define OF_USE_NATIVE_MODE -1
#ifdef CONFIG_OF
-int of_get_display_timing(struct device_node *np, const char *name,
+int of_get_display_timing(const struct device_node *np, const char *name,
struct display_timing *dt);
-struct display_timings *of_get_display_timings(struct device_node *np);
-int of_display_timings_exist(struct device_node *np);
+struct display_timings *of_get_display_timings(const struct device_node *np);
+int of_display_timings_exist(const struct device_node *np);
#else
-static inline int of_get_display_timing(struct device_node *np, const char *name,
- struct display_timing *dt)
+static inline int of_get_display_timing(const struct device_node *np,
+ const char *name, struct display_timing *dt)
{
return -ENOSYS;
}
-static inline struct display_timings *of_get_display_timings(struct device_node *np)
+static inline struct display_timings *
+of_get_display_timings(const struct device_node *np)
{
return NULL;
}
-static inline int of_display_timings_exist(struct device_node *np)
+static inline int of_display_timings_exist(const struct device_node *np)
{
return -ENOSYS;
}
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
new file mode 100644
index 000000000000..9d874db13c0e
--- /dev/null
+++ b/include/xen/arm/hypercall.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_ARM_XEN_HYPERCALL_H
+#define _ASM_ARM_XEN_HYPERCALL_H
+
+#include <linux/bug.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/platform.h>
+
+long privcmd_call(unsigned call, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5);
+int HYPERVISOR_xen_version(int cmd, void *arg);
+int HYPERVISOR_console_io(int cmd, int count, char *str);
+int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
+int HYPERVISOR_sched_op(int cmd, void *arg);
+int HYPERVISOR_event_channel_op(int cmd, void *arg);
+unsigned long HYPERVISOR_hvm_op(int op, void *arg);
+int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
+int HYPERVISOR_physdev_op(int cmd, void *arg);
+int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
+int HYPERVISOR_tmem_op(void *arg);
+int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
+int HYPERVISOR_platform_op_raw(void *arg);
+static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
+{
+ op->interface_version = XENPF_INTERFACE_VERSION;
+ return HYPERVISOR_platform_op_raw(op);
+}
+int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
+
+static inline int
+HYPERVISOR_suspend(unsigned long start_info_mfn)
+{
+ struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
+
+ /* start_info_mfn is unused on ARM */
+ return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+}
+
+static inline void
+MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
+ unsigned int new_val, unsigned long flags)
+{
+ BUG();
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+ int count, int *success_count, domid_t domid)
+{
+ BUG();
+}
+
+#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
new file mode 100644
index 000000000000..95251512e2c4
--- /dev/null
+++ b/include/xen/arm/hypervisor.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_ARM_XEN_HYPERVISOR_H
+#define _ASM_ARM_XEN_HYPERVISOR_H
+
+#include <linux/init.h>
+
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+
+/* Lazy mode for batching updates / context switch */
+enum paravirt_lazy_mode {
+ PARAVIRT_LAZY_NONE,
+ PARAVIRT_LAZY_MMU,
+ PARAVIRT_LAZY_CPU,
+};
+
+static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+{
+ return PARAVIRT_LAZY_NONE;
+}
+
+extern struct dma_map_ops *xen_dma_ops;
+
+#ifdef CONFIG_XEN
+void __init xen_early_init(void);
+#else
+static inline void xen_early_init(void) { return; }
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline void xen_arch_register_cpu(int num)
+{
+}
+
+static inline void xen_arch_unregister_cpu(int num)
+{
+}
+#endif
+
+#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/include/xen/arm/interface.h b/include/xen/arm/interface.h
new file mode 100644
index 000000000000..75d596862892
--- /dev/null
+++ b/include/xen/arm/interface.h
@@ -0,0 +1,85 @@
+/******************************************************************************
+ * Guest OS interface to ARM Xen.
+ *
+ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
+ */
+
+#ifndef _ASM_ARM_XEN_INTERFACE_H
+#define _ASM_ARM_XEN_INTERFACE_H
+
+#include <linux/types.h>
+
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { union { type *p; uint64_aligned_t q; }; } \
+ __guest_handle_ ## name
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name) \
+ __DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+
+#define set_xen_guest_handle(hnd, val) \
+ do { \
+ if (sizeof(hnd) == 8) \
+ *(uint64_t *)&(hnd) = 0; \
+ (hnd).p = val; \
+ } while (0)
+
+#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
+
+#ifndef __ASSEMBLY__
+/* Explicitly size integers that represent pfns in the interface with
+ * Xen so that we can have one ABI that works for 32 and 64 bit guests.
+ * Note that this means that the xen_pfn_t type may be capable of
+ * representing pfn's which the guest cannot represent in its own pfn
+ * type. However since pfn space is controlled by the guest this is
+ * fine since it simply wouldn't be able to create any sure pfns in
+ * the first place.
+ */
+typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn "llx"
+typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong "llx"
+typedef int64_t xen_long_t;
+#define PRI_xen_long "llx"
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(void);
+DEFINE_GUEST_HANDLE(uint64_t);
+DEFINE_GUEST_HANDLE(uint32_t);
+DEFINE_GUEST_HANDLE(xen_pfn_t);
+DEFINE_GUEST_HANDLE(xen_ulong_t);
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 1
+
+struct arch_vcpu_info { };
+struct arch_shared_info { };
+
+/* TODO: Move pvclock definitions some place arch independent */
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ s8 tsc_shift;
+ u8 flags;
+ u8 pad[2];
+} __attribute__((__packed__)); /* 32 bytes */
+
+/* It is OK to have a 12 bytes struct with no padding because it is packed */
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+ u32 sec_hi;
+} __attribute__((__packed__));
+#endif
+
+#endif /* _ASM_ARM_XEN_INTERFACE_H */
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
new file mode 100644
index 000000000000..95ce6ac3a971
--- /dev/null
+++ b/include/xen/arm/page-coherent.h
@@ -0,0 +1,98 @@
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void __xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void __xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+ return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+ __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long page_pfn = page_to_xen_pfn(page);
+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+ unsigned long compound_pages =
+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+ bool local = (page_pfn <= dev_pfn) &&
+ (dev_pfn - page_pfn < compound_pages);
+
+ /*
+ * Dom0 is mapped 1:1, while the Linux page can span across
+ * multiple Xen pages, it's not possible for it to contain a
+ * mix of local and foreign Xen pages. So if the first xen_pfn
+ * == mfn the page is local otherwise it's a foreign page
+ * grant-mapped in dom0. If the page is local we can safely
+ * call the native dma_ops function, otherwise we call the xen
+ * specific function.
+ */
+ if (local)
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ else
+ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ /*
+ * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+ * multiple Xen page, it's not possible to have a mix of local and
+ * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
+ * foreign mfn will always return false. If the page is local we can
+ * safely call the native dma_ops function, otherwise we call the xen
+ * specific function.
+ */
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->unmap_page)
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+ } else
+ __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->sync_single_for_device)
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
+
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
new file mode 100644
index 000000000000..415dbc6e43fd
--- /dev/null
+++ b/include/xen/arm/page.h
@@ -0,0 +1,122 @@
+#ifndef _ASM_ARM_XEN_PAGE_H
+#define _ASM_ARM_XEN_PAGE_H
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include <linux/pfn.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <xen/xen.h>
+#include <xen/interface/grant_table.h>
+
+#define phys_to_machine_mapping_valid(pfn) (1)
+
+/* Xen machine address */
+typedef struct xmaddr {
+ phys_addr_t maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+ phys_addr_t paddr;
+} xpaddr_t;
+
+#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
+#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
+
+#define INVALID_P2M_ENTRY (~0UL)
+
+/*
+ * The pseudo-physical frame (pfn) used in all the helpers is always based
+ * on Xen page granularity (i.e 4KB).
+ *
+ * A Linux page may be split across multiple non-contiguous Xen page so we
+ * have to keep track with frame based on 4KB page granularity.
+ *
+ * PV drivers should never make a direct usage of those helpers (particularly
+ * pfn_to_gfn and gfn_to_pfn).
+ */
+
+unsigned long __pfn_to_mfn(unsigned long pfn);
+extern struct rb_root phys_to_mach;
+
+/* Pseudo-physical <-> Guest conversion */
+static inline unsigned long pfn_to_gfn(unsigned long pfn)
+{
+ return pfn;
+}
+
+static inline unsigned long gfn_to_pfn(unsigned long gfn)
+{
+ return gfn;
+}
+
+/* Pseudo-physical <-> BUS conversion */
+static inline unsigned long pfn_to_bfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (phys_to_mach.rb_node != NULL) {
+ mfn = __pfn_to_mfn(pfn);
+ if (mfn != INVALID_P2M_ENTRY)
+ return mfn;
+ }
+
+ return pfn;
+}
+
+static inline unsigned long bfn_to_pfn(unsigned long bfn)
+{
+ return bfn;
+}
+
+#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
+
+/* VIRT <-> GUEST conversion */
+#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
+#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
+
+/* Only used in PV code. But ARM guests are always HVM. */
+static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
+{
+ BUG();
+}
+
+/* TODO: this shouldn't be here but it is because the frontend drivers
+ * are using it (its rolled in headers) even though we won't hit the code path.
+ * So for right now just punt with this.
+ */
+static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
+{
+ BUG();
+ return NULL;
+}
+
+extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
+
+extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
+ struct page **pages, unsigned int count);
+
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
+ unsigned long nr_pages);
+
+static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ return __set_phys_to_machine(pfn, mfn);
+}
+
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
+#define xen_unmap(cookie) iounmap((cookie))
+
+bool xen_arch_need_swiotlb(struct device *dev,
+ phys_addr_t phys,
+ dma_addr_t dev_addr);
+unsigned long xen_get_swiotlb_free_pages(unsigned int order);
+
+#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 7c35e279d1e3..a0083be5d529 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -51,9 +51,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
extern int
-xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
-
-extern int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
extern int
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 32b944b7cebd..271ba62503c7 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -151,6 +151,10 @@ __scanf(4, 5)
int xenbus_scanf(struct xenbus_transaction t,
const char *dir, const char *node, const char *fmt, ...);
+/* Read an (optional) unsigned value. */
+unsigned int xenbus_read_unsigned(const char *dir, const char *node,
+ unsigned int default_val);
+
/* Single printf and write: returns -errno or 0. */
__printf(4, 5)
int xenbus_printf(struct xenbus_transaction t,