diff options
author | Dave Airlie <airlied@redhat.com> | 2020-01-20 11:08:11 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-01-20 11:42:57 +1000 |
commit | 3d4743131b8de970faa4b979ead0fadfe5d2de9d (patch) | |
tree | 68e948c2d94d48598dd37e31bb654feb0b43ae4a | |
parent | df95968ff78931576ac7a3d3b30312894aaaf22e (diff) | |
parent | def9d2780727cec3313ed3522d0123158d87224d (diff) | |
download | linux-next-3d4743131b8de970faa4b979ead0fadfe5d2de9d.tar.gz |
Backmerge v5.5-rc7 into drm-next
msm needs 5.5-rc4, go to the latest.
Signed-off-by: Dave Airlie <airlied@redhat.com>
1204 files changed, 11860 insertions, 7137 deletions
@@ -99,6 +99,7 @@ Jacob Shin <Jacob.Shin@amd.com> Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com> Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com> Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com> +Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com> James Bottomley <jejb@mulgrave.(none)> James Bottomley <jejb@titanic.il.steeleye.com> James E Wilson <wilson@specifix.com> @@ -152,6 +153,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org> Li Yang <leoyang.li@nxp.com> <leoli@freescale.com> +Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com> Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com> Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> @@ -265,6 +267,7 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org> Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com> Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com> Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com> +Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com> Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io index 8ca498447aeb..05601a90a9b6 100644 --- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io +++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io @@ -29,13 +29,13 @@ Description: This file shows the system fans direction: The files are read only. -What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version Date: November 2018 KernelVersion: 5.0 Contact: Vadim Pasternak <vadimpmellanox.com> Description: These files show with which CPLD versions have been burned - on LED board. + on LED or Gearbox board. The files are read only. @@ -121,6 +121,15 @@ Description: These files show the system reset cause, as following: ComEx The files are read only. +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version +Date: November 2018 +KernelVersion: 5.0 +Contact: Vadim Pasternak <vadimpmellanox.com> +Description: These files show with which CPLD versions have been burned + on LED board. + + The files are read only. + Date: June 2019 KernelVersion: 5.3 Contact: Vadim Pasternak <vadimpmellanox.com> diff --git a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl index c65a80574869..401d202f478b 100644 --- a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl +++ b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl @@ -1,4 +1,4 @@ -What: /sys/bus/platform/devices/MLNXBF04:00/driver/lifecycle_state +What: /sys/bus/platform/devices/MLNXBF04:00/lifecycle_state Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun <lsun@mellanox.com>" @@ -10,7 +10,7 @@ Description: GA Non-Secured - Non-Secure chip and not able to change state RMA - Return Merchandise Authorization -What: /sys/bus/platform/devices/MLNXBF04:00/driver/post_reset_wdog +What: /sys/bus/platform/devices/MLNXBF04:00/post_reset_wdog Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun <lsun@mellanox.com>" @@ -19,7 +19,7 @@ Description: to reboot the chip and recover it to the old state if the new boot partition fails. -What: /sys/bus/platform/devices/MLNXBF04:00/driver/reset_action +What: /sys/bus/platform/devices/MLNXBF04:00/reset_action Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun <lsun@mellanox.com>" @@ -30,7 +30,7 @@ Description: emmc - boot from the onchip eMMC emmc_legacy - boot from the onchip eMMC in legacy (slow) mode -What: /sys/bus/platform/devices/MLNXBF04:00/driver/second_reset_action +What: /sys/bus/platform/devices/MLNXBF04:00/second_reset_action Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun <lsun@mellanox.com>" @@ -44,7 +44,7 @@ Description: swap_emmc - swap the primary / secondary boot partition none - cancel the action -What: /sys/bus/platform/devices/MLNXBF04:00/driver/secure_boot_fuse_state +What: /sys/bus/platform/devices/MLNXBF04:00/secure_boot_fuse_state Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun <lsun@mellanox.com>" diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt index 1c5d2281efc9..2a97aaec8b12 100644 --- a/Documentation/admin-guide/devices.txt +++ b/Documentation/admin-guide/devices.txt @@ -319,7 +319,7 @@ 182 = /dev/perfctr Performance-monitoring counters 183 = /dev/hwrng Generic random number generator 184 = /dev/cpu/microcode CPU microcode update interface - 186 = /dev/atomicps Atomic shapshot of process state data + 186 = /dev/atomicps Atomic snapshot of process state data 187 = /dev/irnet IrNET device 188 = /dev/smbusbios SMBus BIOS 189 = /dev/ussp_ctl User space serial port control diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst index 059ddcbe769d..9bc93f0ce0c9 100644 --- a/Documentation/admin-guide/ext4.rst +++ b/Documentation/admin-guide/ext4.rst @@ -181,14 +181,17 @@ When mounting an ext4 filesystem, the following option are accepted: system after its metadata has been committed to the journal. commit=nrsec (*) - Ext4 can be told to sync all its data and metadata every 'nrsec' - seconds. The default value is 5 seconds. This means that if you lose - your power, you will lose as much as the latest 5 seconds of work (your - filesystem will not be damaged though, thanks to the journaling). This - default value (or any low value) will hurt performance, but it's good - for data-safety. Setting it to 0 will have the same effect as leaving - it at the default (5 seconds). Setting it to very large values will - improve performance. + This setting limits the maximum age of the running transaction to + 'nrsec' seconds. The default value is 5 seconds. This means that if + you lose your power, you will lose as much as the latest 5 seconds of + metadata changes (your filesystem will not be damaged though, thanks + to the journaling). This default value (or any low value) will hurt + performance, but it's good for data-safety. Setting it to 0 will have + the same effect as leaving it at the default (5 seconds). Setting it + to very large values will improve performance. Note that due to + delayed allocation even older data can be lost on power failure since + writeback of those data begins only after time set in + /proc/sys/vm/dirty_expire_centisecs. barrier=<0|1(*)>, barrier(*), nobarrier This enables/disables the use of write barriers in the jbd code. diff --git a/Documentation/admin-guide/xfs.rst b/Documentation/admin-guide/xfs.rst index fb5b39f73059..ad911be5b5e9 100644 --- a/Documentation/admin-guide/xfs.rst +++ b/Documentation/admin-guide/xfs.rst @@ -253,7 +253,7 @@ The following sysctls are available for the XFS filesystem: pool. fs.xfs.speculative_prealloc_lifetime - (Units: seconds Min: 1 Default: 300 Max: 86400) + (Units: seconds Min: 1 Default: 300 Max: 86400) The interval at which the background scanning for inodes with unused speculative preallocation runs. The scan removes unused preallocation from clean inodes and releases diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst index 36890b026e77..1c4e1825d769 100644 --- a/Documentation/dev-tools/kcov.rst +++ b/Documentation/dev-tools/kcov.rst @@ -251,11 +251,11 @@ selectively from different subsystems. .. code-block:: c struct kcov_remote_arg { - unsigned trace_mode; - unsigned area_size; - unsigned num_handles; - uint64_t common_handle; - uint64_t handles[0]; + __u32 trace_mode; + __u32 area_size; + __u32 num_handles; + __aligned_u64 common_handle; + __aligned_u64 handles[0]; }; #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst index ecdfdc9d4b03..61ae13c44f91 100644 --- a/Documentation/dev-tools/kselftest.rst +++ b/Documentation/dev-tools/kselftest.rst @@ -203,12 +203,12 @@ Test Module Kselftest tests the kernel from userspace. Sometimes things need testing from within the kernel, one method of doing this is to create a test module. We can tie the module into the kselftest framework by -using a shell script test runner. ``kselftest_module.sh`` is designed +using a shell script test runner. ``kselftest/module.sh`` is designed to facilitate this process. There is also a header file provided to assist writing kernel modules that are for use with kselftest: - ``tools/testing/kselftest/kselftest_module.h`` -- ``tools/testing/kselftest/kselftest_module.sh`` +- ``tools/testing/kselftest/kselftest/module.sh`` How to use ---------- @@ -247,7 +247,7 @@ A bare bones test module might look like this: #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include "../tools/testing/selftests/kselftest_module.h" + #include "../tools/testing/selftests/kselftest/module.h" KSTM_MODULE_GLOBALS(); @@ -276,7 +276,7 @@ Example test script #!/bin/bash # SPDX-License-Identifier: GPL-2.0+ - $(dirname $0)/../kselftest_module.sh "foo" test_foo + $(dirname $0)/../kselftest/module.sh "foo" test_foo Test Harness diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst index 26ffb46bdf99..c60d760a0eed 100644 --- a/Documentation/dev-tools/kunit/index.rst +++ b/Documentation/dev-tools/kunit/index.rst @@ -9,6 +9,7 @@ KUnit - Unit Testing for the Linux Kernel start usage + kunit-tool api/index faq diff --git a/Documentation/dev-tools/kunit/kunit-tool.rst b/Documentation/dev-tools/kunit/kunit-tool.rst new file mode 100644 index 000000000000..50d46394e97e --- /dev/null +++ b/Documentation/dev-tools/kunit/kunit-tool.rst @@ -0,0 +1,57 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +kunit_tool How-To +================= + +What is kunit_tool? +=================== + +kunit_tool is a script (``tools/testing/kunit/kunit.py``) that aids in building +the Linux kernel as UML (`User Mode Linux +<http://user-mode-linux.sourceforge.net/>`_), running KUnit tests, parsing +the test results and displaying them in a user friendly manner. + +What is a kunitconfig? +====================== + +It's just a defconfig that kunit_tool looks for in the base directory. +kunit_tool uses it to generate a .config as you might expect. In addition, it +verifies that the generated .config contains the CONFIG options in the +kunitconfig; the reason it does this is so that it is easy to be sure that a +CONFIG that enables a test actually ends up in the .config. + +How do I use kunit_tool? +======================== + +If a kunitconfig is present at the root directory, all you have to do is: + +.. code-block:: bash + + ./tools/testing/kunit/kunit.py run + +However, you most likely want to use it with the following options: + +.. code-block:: bash + + ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all` + +- ``--timeout`` sets a maximum amount of time to allow tests to run. +- ``--jobs`` sets the number of threads to use to build the kernel. + +If you just want to use the defconfig that ships with the kernel, you can +append the ``--defconfig`` flag as well: + +.. code-block:: bash + + ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all` --defconfig + +.. note:: + This command is particularly helpful for getting started because it + just works. No kunitconfig needs to be present. + +For a list of all the flags supported by kunit_tool, you can run: + +.. code-block:: bash + + ./tools/testing/kunit/kunit.py run --help diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst index aeeddfafeea2..4e1d24db6b13 100644 --- a/Documentation/dev-tools/kunit/start.rst +++ b/Documentation/dev-tools/kunit/start.rst @@ -19,21 +19,21 @@ The wrapper can be run with: .. code-block:: bash - ./tools/testing/kunit/kunit.py run + ./tools/testing/kunit/kunit.py run --defconfig -Creating a kunitconfig -====================== -The Python script is a thin wrapper around Kbuild as such, it needs to be -configured with a ``kunitconfig`` file. This file essentially contains the +For more information on this wrapper (also called kunit_tool) checkout the +:doc:`kunit-tool` page. + +Creating a .kunitconfig +======================= +The Python script is a thin wrapper around Kbuild. As such, it needs to be +configured with a ``.kunitconfig`` file. This file essentially contains the regular Kernel config, with the specific test targets as well. .. code-block:: bash - git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO cd $PATH_TO_LINUX_REPO - ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig - -You may want to add kunitconfig to your local gitignore. + cp arch/um/configs/kunit_defconfig .kunitconfig Verifying KUnit Works --------------------- @@ -59,8 +59,8 @@ If everything worked correctly, you should see the following: followed by a list of tests that are run. All of them should be passing. .. note:: - Because it is building a lot of sources for the first time, the ``Building - kunit kernel`` step may take a while. + Because it is building a lot of sources for the first time, the + ``Building KUnit kernel`` step may take a while. Writing your first test ======================= @@ -148,7 +148,7 @@ and the following to ``drivers/misc/Makefile``: obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o -Now add it to your ``kunitconfig``: +Now add it to your ``.kunitconfig``: .. code-block:: none @@ -159,7 +159,7 @@ Now you can run the test: .. code-block:: bash - ./tools/testing/kunit/kunit.py + ./tools/testing/kunit/kunit.py run You should see the following failure: diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst index c6e69634e274..b9a065ab681e 100644 --- a/Documentation/dev-tools/kunit/usage.rst +++ b/Documentation/dev-tools/kunit/usage.rst @@ -16,7 +16,7 @@ Organization of this document ============================= This document is organized into two main sections: Testing and Isolating -Behavior. The first covers what a unit test is and how to use KUnit to write +Behavior. The first covers what unit tests are and how to use KUnit to write them. The second covers how to use KUnit to isolate code and make it possible to unit test code that was otherwise un-unit-testable. @@ -174,13 +174,13 @@ Test Suites ~~~~~~~~~~~ Now obviously one unit test isn't very helpful; the power comes from having -many test cases covering all of your behaviors. Consequently it is common to -have many *similar* tests; in order to reduce duplication in these closely -related tests most unit testing frameworks provide the concept of a *test -suite*, in KUnit we call it a *test suite*; all it is is just a collection of -test cases for a unit of code with a set up function that gets invoked before -every test cases and then a tear down function that gets invoked after every -test case completes. +many test cases covering all of a unit's behaviors. Consequently it is common +to have many *similar* tests; in order to reduce duplication in these closely +related tests most unit testing frameworks - including KUnit - provide the +concept of a *test suite*. A *test suite* is just a collection of test cases +for a unit of code with a set up function that gets invoked before every test +case and then a tear down function that gets invoked after every test case +completes. Example: @@ -211,7 +211,7 @@ KUnit test framework. .. note:: A test case will only be run if it is associated with a test suite. -For a more information on these types of things see the :doc:`api/test`. +For more information on these types of things see the :doc:`api/test`. Isolating Behavior ================== @@ -338,7 +338,7 @@ We can easily test this code by *faking out* the underlying EEPROM: return count; } - ssize_t fake_eeprom_write(struct eeprom *this, size_t offset, const char *buffer, size_t count) + ssize_t fake_eeprom_write(struct eeprom *parent, size_t offset, const char *buffer, size_t count) { struct fake_eeprom *this = container_of(parent, struct fake_eeprom, parent); @@ -454,7 +454,7 @@ KUnit on non-UML architectures By default KUnit uses UML as a way to provide dependencies for code under test. Under most circumstances KUnit's usage of UML should be treated as an implementation detail of how KUnit works under the hood. Nevertheless, there -are instances where being able to run architecture specific code, or test +are instances where being able to run architecture specific code or test against real hardware is desirable. For these reasons KUnit supports running on other architectures. @@ -557,7 +557,7 @@ run your tests on your hardware setup just by compiling for your architecture. .. important:: Always prefer tests that run on UML to tests that only run under a particular architecture, and always prefer tests that run under QEMU or another easy - (and monitarily free) to obtain software environment to a specific piece of + (and monetarily free) to obtain software environment to a specific piece of hardware. Nevertheless, there are still valid reasons to write an architecture or hardware diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt index 2210f4359c45..8347b1e7c080 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt @@ -18,8 +18,10 @@ Optional properties: - dma-names: should contain "tx" and "rx". - atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO capable I2C controllers. -- i2c-sda-hold-time-ns: TWD hold time, only available for "atmel,sama5d4-i2c" - and "atmel,sama5d2-i2c". +- i2c-sda-hold-time-ns: TWD hold time, only available for: + "atmel,sama5d4-i2c", + "atmel,sama5d2-i2c", + "microchip,sam9x60-i2c". - Child nodes conforming to i2c bus binding Examples : diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt index 27e1b4cebfbd..6bdcc3f84bd3 100644 --- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt @@ -10,7 +10,6 @@ Required properties: - #size-cells: 0 - spi-max-frequency: Maximum frequency of the SPI bus the chip can operate at should be less than or equal to 18 MHz. - - device-wake-gpios: Wake up GPIO to wake up the TCAN device. - interrupt-parent: the phandle to the interrupt controller which provides the interrupt. - interrupts: interrupt specification for data-ready. @@ -23,6 +22,7 @@ Optional properties: reset. - device-state-gpios: Input GPIO that indicates if the device is in a sleep state or if the device is active. + - device-wake-gpios: Wake up GPIO to wake up the TCAN device. Example: tcan4x5x: tcan4x5x@0 { @@ -36,5 +36,5 @@ tcan4x5x: tcan4x5x@0 { interrupts = <14 GPIO_ACTIVE_LOW>; device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; }; diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml index 4845e29411e4..e08cd4c4d568 100644 --- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml +++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml @@ -347,6 +347,7 @@ allOf: - st,spear600-gmac then: + properties: snps,tso: $ref: /schemas/types.yaml#definitions/flag description: diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt index 6e5341b4f891..ee59409640f2 100644 --- a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt +++ b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt @@ -22,6 +22,6 @@ Example: }; ðernet_switch { - resets = <&reset>; + resets = <&reset 26>; reset-names = "switch"; }; diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml index 732339275848..1e0ca6ccf64b 100644 --- a/Documentation/devicetree/bindings/spi/spi-controller.yaml +++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml @@ -111,7 +111,7 @@ patternProperties: spi-rx-bus-width: allOf: - $ref: /schemas/types.yaml#/definitions/uint32 - - enum: [ 1, 2, 4 ] + - enum: [ 1, 2, 4, 8 ] - default: 1 description: Bus width to the SPI bus used for MISO. @@ -123,7 +123,7 @@ patternProperties: spi-tx-bus-width: allOf: - $ref: /schemas/types.yaml#/definitions/uint32 - - enum: [ 1, 2, 4 ] + - enum: [ 1, 2, 4, 8 ] - default: 1 description: Bus width to the SPI bus used for MOSI. diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt index 059d58a549c7..6fb2b0671994 100644 --- a/Documentation/features/debug/gcov-profile-all/arch-support.txt +++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt @@ -23,7 +23,7 @@ | openrisc: | TODO | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | ok | | sh: | ok | | sparc: | TODO | diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst index 74bef19f69f0..231e6a64957f 100644 --- a/Documentation/kbuild/kconfig-language.rst +++ b/Documentation/kbuild/kconfig-language.rst @@ -196,14 +196,11 @@ applicable everywhere (see syntax). or equal to the first symbol and smaller than or equal to the second symbol. -- help text: "help" or "---help---" +- help text: "help" This defines a help text. The end of the help text is determined by the indentation level, this means it ends at the first line which has a smaller indentation than the first line of the help text. - "---help---" and "help" do not differ in behaviour, "---help---" is - used to help visually separate configuration logic from help within - the file as an aid to developers. - misc options: "option" <symbol>[=<value>] diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index b9b50553bfc5..d7e6534a8505 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -297,9 +297,19 @@ more details, with real examples. If CONFIG_EXT2_FS is set to either 'y' (built-in) or 'm' (modular) the corresponding obj- variable will be set, and kbuild will descend down in the ext2 directory. - Kbuild only uses this information to decide that it needs to visit - the directory, it is the Makefile in the subdirectory that - specifies what is modular and what is built-in. + + Kbuild uses this information not only to decide that it needs to visit + the directory, but also to decide whether or not to link objects from + the directory into vmlinux. + + When Kbuild descends into the directory with 'y', all built-in objects + from that directory are combined into the built-in.a, which will be + eventually linked into vmlinux. + + When Kbuild descends into the directory with 'm', in contrast, nothing + from that directory will be linked into vmlinux. If the Makefile in + that directory specifies obj-y, those objects will be left orphan. + It is very likely a bug of the Makefile or of dependencies in Kconfig. It is good practice to use a `CONFIG_` variable when assigning directory names. This allows kbuild to totally skip the directory if the diff --git a/Documentation/media/v4l-drivers/meye.rst b/Documentation/media/v4l-drivers/meye.rst index a572996cdbf6..dc57a6a91b43 100644 --- a/Documentation/media/v4l-drivers/meye.rst +++ b/Documentation/media/v4l-drivers/meye.rst @@ -95,7 +95,7 @@ so all video4linux tools (like xawtv) should work with this driver. Besides the video4linux interface, the driver has a private interface for accessing the Motion Eye extended parameters (camera sharpness, -agc, video framerate), the shapshot and the MJPEG capture facilities. +agc, video framerate), the snapshot and the MJPEG capture facilities. This interface consists of several ioctls (prototypes and structures can be found in include/linux/meye.h): diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst index eef20d0bcf7c..64553d8d91cb 100644 --- a/Documentation/networking/dsa/sja1105.rst +++ b/Documentation/networking/dsa/sja1105.rst @@ -230,12 +230,6 @@ simultaneously on two ports. The driver checks the consistency of the schedules against this restriction and errors out when appropriate. Schedule analysis is needed to avoid this, which is outside the scope of the document. -At the moment, the time-aware scheduler can only be triggered based on a -standalone clock and not based on PTP time. This means the base-time argument -from tc-taprio is ignored and the schedule starts right away. It also means it -is more difficult to phase-align the scheduler with the other devices in the -network. - Device Tree bindings and board design ===================================== diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index fd26788e8c96..48ccb1b31160 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -603,7 +603,7 @@ tcp_synack_retries - INTEGER with the current initial RTO of 1second. With this the final timeout for a passive TCP connection will happen after 63seconds. -tcp_syncookies - BOOLEAN +tcp_syncookies - INTEGER Only valid when the kernel was compiled with CONFIG_SYN_COOKIES Send out syncookies when the syn backlog queue of a socket overflows. This is to prevent against the common 'SYN flood attack' diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst index dc60b13fcd09..f5be243d250a 100644 --- a/Documentation/networking/j1939.rst +++ b/Documentation/networking/j1939.rst @@ -339,7 +339,7 @@ To claim an address following code example can be used: .pgn = J1939_PGN_ADDRESS_CLAIMED, .pgn_mask = J1939_PGN_PDU1_MAX, }, { - .pgn = J1939_PGN_ADDRESS_REQUEST, + .pgn = J1939_PGN_REQUEST, .pgn_mask = J1939_PGN_PDU1_MAX, }, { .pgn = J1939_PGN_ADDRESS_COMMANDED, diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst index 642fa963be3c..d5c9320901c3 100644 --- a/Documentation/networking/netdev-FAQ.rst +++ b/Documentation/networking/netdev-FAQ.rst @@ -34,8 +34,8 @@ the names, the ``net`` tree is for fixes to existing code already in the mainline tree from Linus, and ``net-next`` is where the new code goes for the future release. You can find the trees here: -- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git -- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git +- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git +- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git Q: How often do changes from these trees make it to the mainline Linus tree? ---------------------------------------------------------------------------- diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst index 799580acc8de..5d54946cfc75 100644 --- a/Documentation/process/embargoed-hardware-issues.rst +++ b/Documentation/process/embargoed-hardware-issues.rst @@ -255,7 +255,7 @@ an involved disclosed party. The current ambassadors list: Red Hat Josh Poimboeuf <jpoimboe@redhat.com> SUSE Jiri Kosina <jkosina@suse.cz> - Amazon + Amazon Peter Bowen <pzb@amzn.com> Google Kees Cook <keescook@chromium.org> ============= ======================================================== diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst index 21aa7d5358e6..6399d92f0b21 100644 --- a/Documentation/process/index.rst +++ b/Documentation/process/index.rst @@ -60,6 +60,7 @@ lack of a better place. volatile-considered-harmful botching-up-ioctls clang-format + ../riscv/patch-acceptance .. only:: subproject and html diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst index 215fd3c1f2d5..fa33bffd8992 100644 --- a/Documentation/riscv/index.rst +++ b/Documentation/riscv/index.rst @@ -7,6 +7,7 @@ RISC-V architecture boot-image-header pmu + patch-acceptance .. only:: subproject and html diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/riscv/patch-acceptance.rst new file mode 100644 index 000000000000..dfe0ac5624fb --- /dev/null +++ b/Documentation/riscv/patch-acceptance.rst @@ -0,0 +1,35 @@ +.. SPDX-License-Identifier: GPL-2.0 + +arch/riscv maintenance guidelines for developers +================================================ + +Overview +-------- +The RISC-V instruction set architecture is developed in the open: +in-progress drafts are available for all to review and to experiment +with implementations. New module or extension drafts can change +during the development process - sometimes in ways that are +incompatible with previous drafts. This flexibility can present a +challenge for RISC-V Linux maintenance. Linux maintainers disapprove +of churn, and the Linux development process prefers well-reviewed and +tested code over experimental code. We wish to extend these same +principles to the RISC-V-related code that will be accepted for +inclusion in the kernel. + +Submit Checklist Addendum +------------------------- +We'll only accept patches for new modules or extensions if the +specifications for those modules or extensions are listed as being +"Frozen" or "Ratified" by the RISC-V Foundation. (Developers may, of +course, maintain their own Linux kernel trees that contain code for +any draft extensions that they wish.) + +Additionally, the RISC-V specification allows implementors to create +their own custom extensions. These custom extensions aren't required +to go through any review or ratification process by the RISC-V +Foundation. To avoid the maintenance complexity and potential +performance impact of adding kernel code for implementor-specific +RISC-V extensions, we'll only to accept patches for extensions that +have been officially frozen or ratified by the RISC-V Foundation. +(Implementors, may, of course, maintain their own Linux kernel trees +containing code for any custom extensions that they wish.) diff --git a/MAINTAINERS b/MAINTAINERS index aa9add598b7d..f1e9be211715 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -720,7 +720,7 @@ F: Documentation/devicetree/bindings/i2c/i2c-altera.txt F: drivers/i2c/busses/i2c-altera.c ALTERA MAILBOX DRIVER -M: Ley Foon Tan <lftan@altera.com> +M: Ley Foon Tan <ley.foon.tan@intel.com> L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained F: drivers/mailbox/mailbox-altera.c @@ -771,6 +771,8 @@ F: drivers/thermal/thermal_mmio.c AMAZON ETHERNET DRIVERS M: Netanel Belgazal <netanel@amazon.com> +M: Arthur Kiyanovski <akiyano@amazon.com> +R: Guy Tzalik <gtzalik@amazon.com> R: Saeed Bishara <saeedb@amazon.com> R: Zorik Machulsky <zorik@amazon.com> L: netdev@vger.kernel.org @@ -1405,7 +1407,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git ARM/ACTIONS SEMI ARCHITECTURE M: Andreas Färber <afaerber@suse.de> -R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> +M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: owl @@ -2272,6 +2274,7 @@ F: drivers/*/*s3c64xx* F: drivers/*/*s5pv210* F: drivers/memory/samsung/ F: drivers/soc/samsung/ +F: drivers/tty/serial/samsung* F: include/linux/soc/samsung/ F: Documentation/arm/samsung/ F: Documentation/devicetree/bindings/arm/samsung/ @@ -3147,7 +3150,7 @@ S: Maintained F: arch/mips/net/ BPF JIT for NFP NICs -M: Jakub Kicinski <jakub.kicinski@netronome.com> +M: Jakub Kicinski <kuba@kernel.org> L: netdev@vger.kernel.org L: bpf@vger.kernel.org S: Supported @@ -5018,7 +5021,7 @@ F: include/linux/dma-mapping.h F: include/linux/dma-noncoherent.h DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422 -M: Lukasz Luba <l.luba@partner.samsung.com> +M: Lukasz Luba <lukasz.luba@arm.com> L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained @@ -6055,6 +6058,7 @@ M: Yash Shah <yash.shah@sifive.com> L: linux-edac@vger.kernel.org S: Supported F: drivers/edac/sifive_edac.c +F: drivers/soc/sifive_l2_cache.c EDAC-SKYLAKE M: Tony Luck <tony.luck@intel.com> @@ -7061,6 +7065,7 @@ L: linux-acpi@vger.kernel.org S: Maintained F: Documentation/firmware-guide/acpi/gpio-properties.rst F: drivers/gpio/gpiolib-acpi.c +F: drivers/gpio/gpiolib-acpi.h GPIO IR Transmitter M: Sean Young <sean@mess.org> @@ -9068,7 +9073,6 @@ F: include/linux/umh.h KERNEL VIRTUAL MACHINE (KVM) M: Paolo Bonzini <pbonzini@redhat.com> -M: Radim Krčmář <rkrcmar@redhat.com> L: kvm@vger.kernel.org W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git @@ -9103,9 +9107,9 @@ F: virt/kvm/arm/ F: include/kvm/arm_* KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) -M: James Hogan <jhogan@kernel.org> L: linux-mips@vger.kernel.org -S: Supported +L: kvm@vger.kernel.org +S: Orphan F: arch/mips/include/uapi/asm/kvm* F: arch/mips/include/asm/kvm* F: arch/mips/kvm/ @@ -9140,7 +9144,6 @@ F: tools/testing/selftests/kvm/*/s390x/ KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) M: Paolo Bonzini <pbonzini@redhat.com> -M: Radim Krčmář <rkrcmar@redhat.com> R: Sean Christopherson <sean.j.christopherson@intel.com> R: Vitaly Kuznetsov <vkuznets@redhat.com> R: Wanpeng Li <wanpengli@tencent.com> @@ -10138,6 +10141,7 @@ S: Maintained F: drivers/media/radio/radio-maxiradio* MCAN MMIO DEVICE DRIVER +M: Dan Murphy <dmurphy@ti.com> M: Sriram Dash <sriram.dash@samsung.com> L: linux-can@vger.kernel.org S: Maintained @@ -11456,7 +11460,7 @@ F: include/uapi/linux/netrom.h F: net/netrom/ NETRONOME ETHERNET DRIVERS -M: Jakub Kicinski <jakub.kicinski@netronome.com> +M: Jakub Kicinski <kuba@kernel.org> L: oss-drivers@netronome.com S: Maintained F: drivers/net/ethernet/netronome/ @@ -11485,8 +11489,8 @@ M: "David S. Miller" <davem@davemloft.net> L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net Q: http://patchwork.ozlabs.org/project/netdev/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git S: Odd Fixes F: Documentation/devicetree/bindings/net/ F: drivers/net/ @@ -11527,8 +11531,8 @@ M: "David S. Miller" <davem@davemloft.net> L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net Q: http://patchwork.ozlabs.org/project/netdev/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git B: mailto:netdev@vger.kernel.org S: Maintained F: net/ @@ -11573,7 +11577,7 @@ M: "David S. Miller" <davem@davemloft.net> M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org> L: netdev@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git S: Maintained F: net/ipv4/ F: net/ipv6/ @@ -11616,7 +11620,7 @@ M: Boris Pismenny <borisp@mellanox.com> M: Aviad Yehezkel <aviadye@mellanox.com> M: John Fastabend <john.fastabend@gmail.com> M: Daniel Borkmann <daniel@iogearbox.net> -M: Jakub Kicinski <jakub.kicinski@netronome.com> +M: Jakub Kicinski <kuba@kernel.org> L: netdev@vger.kernel.org S: Maintained F: net/tls/* @@ -11628,7 +11632,7 @@ L: linux-wireless@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-wireless/list/ NETDEVSIM -M: Jakub Kicinski <jakub.kicinski@netronome.com> +M: Jakub Kicinski <kuba@kernel.org> S: Maintained F: drivers/net/netdevsim/* @@ -11705,7 +11709,7 @@ F: Documentation/scsi/NinjaSCSI.txt F: drivers/scsi/nsp32* NIOS2 ARCHITECTURE -M: Ley Foon Tan <lftan@altera.com> +M: Ley Foon Tan <ley.foon.tan@intel.com> L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git S: Maintained @@ -12589,7 +12593,7 @@ F: Documentation/devicetree/bindings/pci/aardvark-pci.txt F: drivers/pci/controller/pci-aardvark.c PCI DRIVER FOR ALTERA PCIE IP -M: Ley Foon Tan <lftan@altera.com> +M: Ley Foon Tan <ley.foon.tan@intel.com> L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported @@ -12768,7 +12772,7 @@ S: Supported F: Documentation/PCI/pci-error-recovery.rst PCI MSI DRIVER FOR ALTERA MSI IP -M: Ley Foon Tan <lftan@altera.com> +M: Ley Foon Tan <ley.foon.tan@intel.com> L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported @@ -13704,7 +13708,6 @@ F: drivers/net/ethernet/qualcomm/emac/ QUALCOMM ETHQOS ETHERNET DRIVER M: Vinod Koul <vkoul@kernel.org> -M: Niklas Cassel <niklas.cassel@linaro.org> L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -13738,6 +13741,15 @@ L: linux-arm-msm@vger.kernel.org S: Maintained F: drivers/iommu/qcom_iommu.c +QUALCOMM RMNET DRIVER +M: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org> +M: Sean Tranchetti <stranche@codeaurora.org> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/qualcomm/rmnet/ +F: Documentation/networking/device_drivers/qualcomm/rmnet.txt +F: include/linux/if_rmnet.h + QUALCOMM TSENS THERMAL DRIVER M: Amit Kucheria <amit.kucheria@linaro.org> L: linux-pm@vger.kernel.org @@ -14137,6 +14149,7 @@ M: Paul Walmsley <paul.walmsley@sifive.com> M: Palmer Dabbelt <palmer@dabbelt.com> M: Albert Ou <aou@eecs.berkeley.edu> L: linux-riscv@lists.infradead.org +P: Documentation/riscv/patch-acceptance.rst T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git S: Supported F: arch/riscv/ @@ -14564,8 +14577,6 @@ F: include/linux/platform_data/spi-s3c64xx.h SAMSUNG SXGBE DRIVERS M: Byungho An <bh74.an@samsung.com> -M: Girish K S <ks.giri@samsung.com> -M: Vipul Pandya <vipul.pandya@samsung.com> S: Supported L: netdev@vger.kernel.org F: drivers/net/ethernet/samsung/sxgbe/ @@ -16561,6 +16572,13 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Odd Fixes F: sound/soc/codecs/tas571x* +TI TCAN4X5X DEVICE DRIVER +M: Dan Murphy <dmurphy@ti.com> +L: linux-can@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/can/tcan4x5x.txt +F: drivers/net/can/m_can/tcan4x5x.c + TI TRF7970A NFC DRIVER M: Mark Greer <mgreer@animalcreek.com> L: linux-wireless@vger.kernel.org @@ -18053,7 +18071,7 @@ XDP (eXpress Data Path) M: Alexei Starovoitov <ast@kernel.org> M: Daniel Borkmann <daniel@iogearbox.net> M: David S. Miller <davem@davemloft.net> -M: Jakub Kicinski <jakub.kicinski@netronome.com> +M: Jakub Kicinski <kuba@kernel.org> M: Jesper Dangaard Brouer <hawk@kernel.org> M: John Fastabend <john.fastabend@gmail.com> L: netdev@vger.kernel.org @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc7 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -414,6 +414,7 @@ STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump OBJSIZE = $(CROSS_COMPILE)size +READELF = $(CROSS_COMPILE)readelf PAHOLE = pahole LEX = flex YACC = bison @@ -472,7 +473,7 @@ GCC_PLUGINS_CFLAGS := CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 41b16f21beec..0b8b63d0bec1 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -162,7 +162,7 @@ #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS - ST2 r58, r59, PT_sp + 12 + ST2 r58, r59, PT_r58 #endif .endm @@ -172,8 +172,8 @@ LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) - ld r12, [sp, PT_sp + 4] - ld r30, [sp, PT_sp + 8] + ld r12, [sp, PT_r12] + ld r30, [sp, PT_r30] ; Restore SP (into AUX_USER_SP) only if returning to U mode ; - for K mode, it will be implicitly restored as stack is unwound @@ -190,7 +190,7 @@ #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS - LD2 r58, r59, PT_sp + 12 + LD2 r58, r59, PT_r58 #endif .endm diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 9a74ce71a767..30ac40fed2c5 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -8,7 +8,6 @@ #define _ASM_ARC_HUGEPAGE_H #include <linux/types.h> -#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> static inline pte_t pmd_pte(pmd_t pmd) diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index 1f621e416521..c783bcd35eb8 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -66,7 +66,15 @@ int main(void) DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); - DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25)); + +#ifdef CONFIG_ISA_ARCV2 + OFFSET(PT_r12, pt_regs, r12); + OFFSET(PT_r30, pt_regs, r30); +#endif +#ifdef CONFIG_ARC_HAS_ACCL_REGS + OFFSET(PT_r58, pt_regs, r58); + OFFSET(PT_r59, pt_regs, r59); +#endif return 0; } diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig index a376a50d3fea..a931d0a256d0 100644 --- a/arch/arc/plat-eznps/Kconfig +++ b/arch/arc/plat-eznps/Kconfig @@ -7,7 +7,7 @@ menuconfig ARC_PLAT_EZNPS bool "\"EZchip\" ARC dev platform" select CPU_BIG_ENDIAN - select CLKSRC_NPS + select CLKSRC_NPS if !PHYS_ADDR_T_64BIT select EZNPS_GIC select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET help diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ba75e3661a41..96dab76da3b3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -72,6 +72,7 @@ config ARM select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS if MMU diff --git a/arch/arm/boot/dts/am335x-sancloud-bbe.dts b/arch/arm/boot/dts/am335x-sancloud-bbe.dts index 8678e6e35493..e5fdb7abb0d5 100644 --- a/arch/arm/boot/dts/am335x-sancloud-bbe.dts +++ b/arch/arm/boot/dts/am335x-sancloud-bbe.dts @@ -108,7 +108,7 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; }; &i2c0 { diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index cae4500194fe..811c8cae315b 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -86,7 +86,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "osddisplays,osd070t1718-19ts", "panel-dpi"; label = "lcd"; backlight = <&lcd_bl>; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 95314121d111..078cb473fa7d 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -42,7 +42,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "osddisplays,osd070t1718-19ts", "panel-dpi"; label = "lcd"; backlight = <&lcd_bl>; diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts index 820ce3b60bb6..669559c9c95b 100644 --- a/arch/arm/boot/dts/am571x-idk.dts +++ b/arch/arm/boot/dts/am571x-idk.dts @@ -167,11 +167,7 @@ &pcie1_rc { status = "okay"; - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; -}; - -&pcie1_ep { - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; + gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>; }; &mmc1 { diff --git a/arch/arm/boot/dts/am572x-idk-common.dtsi b/arch/arm/boot/dts/am572x-idk-common.dtsi index a064f13b3880..ddf123620e96 100644 --- a/arch/arm/boot/dts/am572x-idk-common.dtsi +++ b/arch/arm/boot/dts/am572x-idk-common.dtsi @@ -147,10 +147,6 @@ gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; }; -&pcie1_ep { - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; -}; - &mailbox5 { status = "okay"; mbox_ipu1_ipc3x: mbox_ipu1_ipc3x { diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi index bc76f1705c0f..a813a0cf3ff3 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi +++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi @@ -29,6 +29,27 @@ reg = <0x0 0x80000000 0x0 0x80000000>; }; + main_12v0: fixedregulator-main_12v0 { + /* main supply */ + compatible = "regulator-fixed"; + regulator-name = "main_12v0"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-always-on; + regulator-boot-on; + }; + + evm_5v0: fixedregulator-evm_5v0 { + /* Output of TPS54531D */ + compatible = "regulator-fixed"; + regulator-name = "evm_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&main_12v0>; + regulator-always-on; + regulator-boot-on; + }; + vdd_3v3: fixedregulator-vdd_3v3 { compatible = "regulator-fixed"; regulator-name = "vdd_3v3"; @@ -547,10 +568,6 @@ gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; }; -&pcie1_ep { - gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; -}; - &mcasp3 { #sound-dai-cells = <0>; assigned-clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>; diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts index c1c9cd30f980..13f7aefe045e 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts @@ -258,9 +258,9 @@ }; }; - pca0: pca9552@60 { + pca0: pca9552@61 { compatible = "nxp,pca9552"; - reg = <0x60>; + reg = <0x61>; #address-cells = <1>; #size-cells = <0>; @@ -521,371 +521,6 @@ &i2c13 { status = "okay"; -}; - -&i2c14 { - status = "okay"; -}; - -&i2c15 { - status = "okay"; -}; - -&i2c0 { - status = "okay"; -}; - -&i2c1 { - status = "okay"; -}; - -&i2c2 { - status = "okay"; -}; - -&i2c3 { - status = "okay"; - - power-supply@68 { - compatible = "ibm,cffps2"; - reg = <0x68>; - }; - - power-supply@69 { - compatible = "ibm,cffps2"; - reg = <0x69>; - }; - - power-supply@6a { - compatible = "ibm,cffps2"; - reg = <0x6a>; - }; - - power-supply@6b { - compatible = "ibm,cffps2"; - reg = <0x6b>; - }; -}; - -&i2c4 { - status = "okay"; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@49 { - compatible = "ti,tmp275"; - reg = <0x49>; - }; - - tmp275@4a { - compatible = "ti,tmp275"; - reg = <0x4a>; - }; -}; - -&i2c5 { - status = "okay"; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@49 { - compatible = "ti,tmp275"; - reg = <0x49>; - }; -}; - -&i2c6 { - status = "okay"; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@4a { - compatible = "ti,tmp275"; - reg = <0x4a>; - }; - - tmp275@4b { - compatible = "ti,tmp275"; - reg = <0x4b>; - }; -}; - -&i2c7 { - status = "okay"; - - si7021-a20@20 { - compatible = "silabs,si7020"; - reg = <0x20>; - }; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - max31785@52 { - compatible = "maxim,max31785a"; - reg = <0x52>; - #address-cells = <1>; - #size-cells = <0>; - - fan@0 { - compatible = "pmbus-fan"; - reg = <0>; - tach-pulses = <2>; - }; - - fan@1 { - compatible = "pmbus-fan"; - reg = <1>; - tach-pulses = <2>; - }; - - fan@2 { - compatible = "pmbus-fan"; - reg = <2>; - tach-pulses = <2>; - }; - - fan@3 { - compatible = "pmbus-fan"; - reg = <3>; - tach-pulses = <2>; - }; - }; - - pca0: pca9552@60 { - compatible = "nxp,pca9552"; - reg = <0x60>; - #address-cells = <1>; - #size-cells = <0>; - - gpio-controller; - #gpio-cells = <2>; - - gpio@0 { - reg = <0>; - }; - - gpio@1 { - reg = <1>; - }; - - gpio@2 { - reg = <2>; - }; - - gpio@3 { - reg = <3>; - }; - - gpio@4 { - reg = <4>; - }; - - gpio@5 { - reg = <5>; - }; - - gpio@6 { - reg = <6>; - }; - - gpio@7 { - reg = <7>; - }; - - gpio@8 { - reg = <8>; - }; - - gpio@9 { - reg = <9>; - }; - - gpio@10 { - reg = <10>; - }; - - gpio@11 { - reg = <11>; - }; - - gpio@12 { - reg = <12>; - }; - - gpio@13 { - reg = <13>; - }; - - gpio@14 { - reg = <14>; - }; - - gpio@15 { - reg = <15>; - }; - }; - - dps: dps310@76 { - compatible = "infineon,dps310"; - reg = <0x76>; - #io-channel-cells = <0>; - }; -}; - -&i2c8 { - status = "okay"; - - ucd90320@b { - compatible = "ti,ucd90160"; - reg = <0x0b>; - }; - - ucd90320@c { - compatible = "ti,ucd90160"; - reg = <0x0c>; - }; - - ucd90320@11 { - compatible = "ti,ucd90160"; - reg = <0x11>; - }; - - rtc@32 { - compatible = "epson,rx8900"; - reg = <0x32>; - }; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@4a { - compatible = "ti,tmp275"; - reg = <0x4a>; - }; -}; - -&i2c9 { - status = "okay"; - - ir35221@42 { - compatible = "infineon,ir35221"; - reg = <0x42>; - }; - - ir35221@43 { - compatible = "infineon,ir35221"; - reg = <0x43>; - }; - - ir35221@44 { - compatible = "infineon,ir35221"; - reg = <0x44>; - }; - - tmp423a@4c { - compatible = "ti,tmp423"; - reg = <0x4c>; - }; - - tmp423b@4d { - compatible = "ti,tmp423"; - reg = <0x4d>; - }; - - ir35221@72 { - compatible = "infineon,ir35221"; - reg = <0x72>; - }; - - ir35221@73 { - compatible = "infineon,ir35221"; - reg = <0x73>; - }; - - ir35221@74 { - compatible = "infineon,ir35221"; - reg = <0x74>; - }; -}; - -&i2c10 { - status = "okay"; - - ir35221@42 { - compatible = "infineon,ir35221"; - reg = <0x42>; - }; - - ir35221@43 { - compatible = "infineon,ir35221"; - reg = <0x43>; - }; - - ir35221@44 { - compatible = "infineon,ir35221"; - reg = <0x44>; - }; - - tmp423a@4c { - compatible = "ti,tmp423"; - reg = <0x4c>; - }; - - tmp423b@4d { - compatible = "ti,tmp423"; - reg = <0x4d>; - }; - - ir35221@72 { - compatible = "infineon,ir35221"; - reg = <0x72>; - }; - - ir35221@73 { - compatible = "infineon,ir35221"; - reg = <0x73>; - }; - - ir35221@74 { - compatible = "infineon,ir35221"; - reg = <0x74>; - }; -}; - -&i2c11 { - status = "okay"; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@49 { - compatible = "ti,tmp275"; - reg = <0x49>; - }; -}; - -&i2c12 { - status = "okay"; -}; - -&i2c13 { - status = "okay"; eeprom@50 { compatible = "atmel,24c64"; diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts index f02de4ab058c..ff49ec76fa7c 100644 --- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts +++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts @@ -122,37 +122,6 @@ }; }; -&fmc { - status = "okay"; - flash@0 { - status = "okay"; - m25p,fast-read; - label = "bmc"; - spi-max-frequency = <50000000>; -#include "openbmc-flash-layout-128.dtsi" - }; - - flash@1 { - status = "okay"; - m25p,fast-read; - label = "alt-bmc"; - spi-max-frequency = <50000000>; - }; -}; - -&spi1 { - status = "okay"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_spi1_default>; - - flash@0 { - status = "okay"; - m25p,fast-read; - label = "pnor"; - spi-max-frequency = <100000000>; - }; -}; - &mac2 { status = "okay"; pinctrl-names = "default"; @@ -165,6 +134,11 @@ &emmc { status = "okay"; +}; + +&fsim0 { + status = "okay"; + #address-cells = <2>; #size-cells = <0>; @@ -820,373 +794,6 @@ status = "okay"; }; -&i2c0 { - status = "okay"; -}; - -&i2c1 { - status = "okay"; -}; - -&i2c2 { - status = "okay"; -}; - -&i2c3 { - status = "okay"; - - bmp: bmp280@77 { - compatible = "bosch,bmp280"; - reg = <0x77>; - #io-channel-cells = <1>; - }; - - max31785@52 { - compatible = "maxim,max31785a"; - reg = <0x52>; - #address-cells = <1>; - #size-cells = <0>; - - fan@0 { - compatible = "pmbus-fan"; - reg = <0>; - tach-pulses = <2>; - maxim,fan-rotor-input = "tach"; - maxim,fan-pwm-freq = <25000>; - maxim,fan-dual-tach; - maxim,fan-no-watchdog; - maxim,fan-no-fault-ramp; - maxim,fan-ramp = <2>; - maxim,fan-fault-pin-mon; - }; - - fan@1 { - compatible = "pmbus-fan"; - reg = <1>; - tach-pulses = <2>; - maxim,fan-rotor-input = "tach"; - maxim,fan-pwm-freq = <25000>; - maxim,fan-dual-tach; - maxim,fan-no-watchdog; - maxim,fan-no-fault-ramp; - maxim,fan-ramp = <2>; - maxim,fan-fault-pin-mon; - }; - - fan@2 { - compatible = "pmbus-fan"; - reg = <2>; - tach-pulses = <2>; - maxim,fan-rotor-input = "tach"; - maxim,fan-pwm-freq = <25000>; - maxim,fan-dual-tach; - maxim,fan-no-watchdog; - maxim,fan-no-fault-ramp; - maxim,fan-ramp = <2>; - maxim,fan-fault-pin-mon; - }; - - fan@3 { - compatible = "pmbus-fan"; - reg = <3>; - tach-pulses = <2>; - maxim,fan-rotor-input = "tach"; - maxim,fan-pwm-freq = <25000>; - maxim,fan-dual-tach; - maxim,fan-no-watchdog; - maxim,fan-no-fault-ramp; - maxim,fan-ramp = <2>; - maxim,fan-fault-pin-mon; - }; - }; - - dps: dps310@76 { - compatible = "infineon,dps310"; - reg = <0x76>; - #io-channel-cells = <0>; - }; - - pca0: pca9552@60 { - compatible = "nxp,pca9552"; - reg = <0x60>; - #address-cells = <1>; - #size-cells = <0>; - - gpio-controller; - #gpio-cells = <2>; - - gpio@0 { - reg = <0>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@1 { - reg = <1>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@2 { - reg = <2>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@3 { - reg = <3>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@4 { - reg = <4>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@5 { - reg = <5>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@6 { - reg = <6>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@7 { - reg = <7>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@8 { - reg = <8>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@9 { - reg = <9>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@10 { - reg = <10>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@11 { - reg = <11>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@12 { - reg = <12>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@13 { - reg = <13>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@14 { - reg = <14>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@15 { - reg = <15>; - type = <PCA955X_TYPE_GPIO>; - }; - }; - - power-supply@68 { - compatible = "ibm,cffps1"; - reg = <0x68>; - }; - - power-supply@69 { - compatible = "ibm,cffps1"; - reg = <0x69>; - }; -}; - -&i2c4 { - status = "okay"; - - tmp423a@4c { - compatible = "ti,tmp423"; - reg = <0x4c>; - }; - - ir35221@70 { - compatible = "infineon,ir35221"; - reg = <0x70>; - }; - - ir35221@71 { - compatible = "infineon,ir35221"; - reg = <0x71>; - }; -}; - -&i2c5 { - status = "okay"; - - tmp423a@4c { - compatible = "ti,tmp423"; - reg = <0x4c>; - }; - - ir35221@70 { - compatible = "infineon,ir35221"; - reg = <0x70>; - }; - - ir35221@71 { - compatible = "infineon,ir35221"; - reg = <0x71>; - }; -}; - -&i2c7 { - status = "okay"; -}; - -&i2c9 { - status = "okay"; - - tmp275@4a { - compatible = "ti,tmp275"; - reg = <0x4a>; - }; -}; - -&i2c10 { - status = "okay"; -}; - -&i2c11 { - status = "okay"; - - pca9552: pca9552@60 { - compatible = "nxp,pca9552"; - reg = <0x60>; - #address-cells = <1>; - #size-cells = <0>; - gpio-controller; - #gpio-cells = <2>; - - gpio-line-names = "PS_SMBUS_RESET_N", "APSS_RESET_N", - "GPU0_TH_OVERT_N_BUFF", "GPU1_TH_OVERT_N_BUFF", - "GPU2_TH_OVERT_N_BUFF", "GPU3_TH_OVERT_N_BUFF", - "GPU4_TH_OVERT_N_BUFF", "GPU5_TH_OVERT_N_BUFF", - "GPU0_PWR_GOOD_BUFF", "GPU1_PWR_GOOD_BUFF", - "GPU2_PWR_GOOD_BUFF", "GPU3_PWR_GOOD_BUFF", - "GPU4_PWR_GOOD_BUFF", "GPU5_PWR_GOOD_BUFF", - "12V_BREAKER_FLT_N", "THROTTLE_UNLATCHED_N"; - - gpio@0 { - reg = <0>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@1 { - reg = <1>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@2 { - reg = <2>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@3 { - reg = <3>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@4 { - reg = <4>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@5 { - reg = <5>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@6 { - reg = <6>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@7 { - reg = <7>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@8 { - reg = <8>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@9 { - reg = <9>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@10 { - reg = <10>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@11 { - reg = <11>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@12 { - reg = <12>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@13 { - reg = <13>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@14 { - reg = <14>; - type = <PCA955X_TYPE_GPIO>; - }; - - gpio@15 { - reg = <15>; - type = <PCA955X_TYPE_GPIO>; - }; - }; - - rtc@32 { - compatible = "epson,rx8900"; - reg = <0x32>; - }; - - eeprom@51 { - compatible = "atmel,24c64"; - reg = <0x51>; - }; - - ucd90160@64 { - compatible = "ti,ucd90160"; - reg = <0x64>; - }; -}; - -&i2c12 { - status = "okay"; -}; - -&i2c13 { - status = "okay"; -}; - &pinctrl { /* Hog these as no driver is probed for the entire LPC block */ pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi index 5f6142d99eeb..b72afbaadaf8 100644 --- a/arch/arm/boot/dts/aspeed-g6.dtsi +++ b/arch/arm/boot/dts/aspeed-g6.dtsi @@ -163,26 +163,6 @@ spi-max-frequency = <50000000>; status = "disabled"; }; - - fsim0: fsi@1e79b000 { - compatible = "aspeed,ast2600-fsi-master", "fsi-master"; - reg = <0x1e79b000 0x94>; - interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_fsi1_default>; - clocks = <&syscon ASPEED_CLK_GATE_FSICLK>; - status = "disabled"; - }; - - fsim1: fsi@1e79b100 { - compatible = "aspeed,ast2600-fsi-master", "fsi-master"; - reg = <0x1e79b100 0x94>; - interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_fsi2_default>; - clocks = <&syscon ASPEED_CLK_GATE_FSICLK>; - status = "disabled"; - }; }; mdio0: mdio@1e650000 { @@ -595,6 +575,25 @@ ranges = <0 0x1e78a000 0x1000>; }; + fsim0: fsi@1e79b000 { + compatible = "aspeed,ast2600-fsi-master", "fsi-master"; + reg = <0x1e79b000 0x94>; + interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fsi1_default>; + clocks = <&syscon ASPEED_CLK_GATE_FSICLK>; + status = "disabled"; + }; + + fsim1: fsi@1e79b100 { + compatible = "aspeed,ast2600-fsi-master", "fsi-master"; + reg = <0x1e79b100 0x94>; + interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fsi2_default>; + clocks = <&syscon ASPEED_CLK_GATE_FSICLK>; + status = "disabled"; + }; }; }; }; diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 2dac3efc7640..1bc45cfd5453 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -174,8 +174,8 @@ mdio: mdio@18002000 { compatible = "brcm,iproc-mdio"; reg = <0x18002000 0x8>; - #size-cells = <1>; - #address-cells = <0>; + #size-cells = <0>; + #address-cells = <1>; status = "disabled"; gphy0: ethernet-phy@0 { diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 961bed832755..e2f6ffb00aa9 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -43,7 +43,7 @@ <0x7c000000 0x0 0xfc000000 0x02000000>, <0x40000000 0x0 0xff800000 0x00800000>; /* Emulate a contiguous 30-bit address range for DMA */ - dma-ranges = <0xc0000000 0x0 0x00000000 0x3c000000>; + dma-ranges = <0xc0000000 0x0 0x00000000 0x40000000>; /* * This node is the provider for the enable-method for diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 3caaa57eb6c8..839491628e87 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -37,7 +37,7 @@ trips { cpu-crit { - temperature = <80000>; + temperature = <90000>; hysteresis = <0>; type = "critical"; }; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 372dc1eb88a0..2d9b4dd05830 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -353,8 +353,8 @@ mdio: mdio@18003000 { compatible = "brcm,iproc-mdio"; reg = <0x18003000 0x8>; - #size-cells = <1>; - #address-cells = <0>; + #size-cells = <0>; + #address-cells = <1>; }; mdio-bus-mux@18003000 { diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi index 6472b056a001..5a2c5320437d 100644 --- a/arch/arm/boot/dts/e60k02.dtsi +++ b/arch/arm/boot/dts/e60k02.dtsi @@ -265,11 +265,6 @@ regulator-name = "LDORTC1"; regulator-boot-on; }; - - ldortc2_reg: LDORTC2 { - regulator-name = "LDORTC2"; - regulator-boot-on; - }; }; }; }; diff --git a/arch/arm/boot/dts/imx6dl-icore-mipi.dts b/arch/arm/boot/dts/imx6dl-icore-mipi.dts index e43bccb78ab2..d8f3821a0ffd 100644 --- a/arch/arm/boot/dts/imx6dl-icore-mipi.dts +++ b/arch/arm/boot/dts/imx6dl-icore-mipi.dts @@ -8,7 +8,7 @@ /dts-v1/; #include "imx6dl.dtsi" -#include "imx6qdl-icore.dtsi" +#include "imx6qdl-icore-1.5.dtsi" / { model = "Engicam i.CoreM6 DualLite/Solo MIPI Starter Kit"; diff --git a/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts b/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts index 5219553df1e7..bb74fc62d913 100644 --- a/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts +++ b/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts @@ -63,7 +63,7 @@ #sound-dai-cells = <0>; clocks = <&clk_ext_audio_codec>; VDDA-supply = <®_3p3v>; - VDDIO-supply = <®_3p3v>; + VDDIO-supply = <&sw2_reg>; }; }; diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi index 845cfad99bf9..87f0aa897086 100644 --- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi +++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi @@ -204,7 +204,7 @@ }; rtc@56 { - compatible = "rv3029c2"; + compatible = "microcrystal,rv3029"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_rtc_hw300>; reg = <0x56>; diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index 71ca76a5e4a5..fe59dde41b64 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -749,10 +749,6 @@ vin-supply = <&vgen5_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen5_reg>; }; diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 4829aa682aeb..bc86cfaaa9c2 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -584,10 +584,6 @@ vin-supply = <&sw2_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&sw2_reg>; }; diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index 3e1d32fdf4b8..5ace9e6acf85 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts @@ -265,10 +265,6 @@ status = "okay"; }; -®_3p0 { - vin-supply = <&sw2_reg>; -}; - &snvs_poweroff { status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx-sdb-reva.dts b/arch/arm/boot/dts/imx6sx-sdb-reva.dts index f1830ed387a5..91a7548fdb8d 100644 --- a/arch/arm/boot/dts/imx6sx-sdb-reva.dts +++ b/arch/arm/boot/dts/imx6sx-sdb-reva.dts @@ -159,10 +159,6 @@ vin-supply = <&vgen6_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen6_reg>; }; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index a8ee7087af5a..5a63ca615722 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -141,10 +141,6 @@ vin-supply = <&vgen6_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen6_reg>; }; diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi index 1506eb12b21e..212144511b66 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi @@ -30,14 +30,26 @@ enable-active-high; }; - reg_sensors: regulator-sensors { + reg_peri_3v3: regulator-peri-3v3 { compatible = "regulator-fixed"; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_sensors_reg>; - regulator-name = "sensors-supply"; + pinctrl-0 = <&pinctrl_peri_3v3>; + regulator-name = "VPERI_3V3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio5 2 GPIO_ACTIVE_LOW>; + /* + * If you want to want to make this dynamic please + * check schematics and test all affected peripherals: + * + * - sensors + * - ethernet phy + * - can + * - bluetooth + * - wm8960 audio codec + * - ov5640 camera + */ + regulator-always-on; }; reg_can_3v3: regulator-can-3v3 { @@ -140,6 +152,7 @@ pinctrl-0 = <&pinctrl_enet1>; phy-mode = "rmii"; phy-handle = <ðphy0>; + phy-supply = <®_peri_3v3>; status = "okay"; }; @@ -148,6 +161,7 @@ pinctrl-0 = <&pinctrl_enet2>; phy-mode = "rmii"; phy-handle = <ðphy1>; + phy-supply = <®_peri_3v3>; status = "okay"; mdio { @@ -193,8 +207,8 @@ magnetometer@e { compatible = "fsl,mag3110"; reg = <0x0e>; - vdd-supply = <®_sensors>; - vddio-supply = <®_sensors>; + vdd-supply = <®_peri_3v3>; + vddio-supply = <®_peri_3v3>; }; }; @@ -227,7 +241,7 @@ flash0: n25q256a@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "micron,n25q256a"; + compatible = "micron,n25q256a", "jedec,spi-nor"; spi-max-frequency = <29000000>; spi-rx-bus-width = <4>; spi-tx-bus-width = <4>; @@ -462,7 +476,7 @@ >; }; - pinctrl_sensors_reg: sensorsreggrp { + pinctrl_peri_3v3: peri3v3grp { fsl,pins = < MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02 0x1b0b0 >; diff --git a/arch/arm/boot/dts/imx7s-colibri.dtsi b/arch/arm/boot/dts/imx7s-colibri.dtsi index 1fb1ec5d3d70..6d16e32aed89 100644 --- a/arch/arm/boot/dts/imx7s-colibri.dtsi +++ b/arch/arm/boot/dts/imx7s-colibri.dtsi @@ -49,3 +49,7 @@ reg = <0x80000000 0x10000000>; }; }; + +&gpmi { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index d37a1927c88e..ab91c98f2124 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -37,10 +37,10 @@ #address-cells = <1>; #size-cells = <0>; - cpu0: cpu@0 { + cpu0: cpu@f00 { compatible = "arm,cortex-a7"; device_type = "cpu"; - reg = <0>; + reg = <0xf00>; }; }; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index 5a7e3e5caebe..3c534cd50ee3 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -253,7 +253,7 @@ &aobus { pmu: pmu@e0 { compatible = "amlogic,meson8-pmu", "syscon"; - reg = <0xe0 0x8>; + reg = <0xe0 0x18>; }; pinctrl_aobus: pinctrl@84 { diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi index d9762de0ed34..6f480827b94d 100644 --- a/arch/arm/boot/dts/mmp3.dtsi +++ b/arch/arm/boot/dts/mmp3.dtsi @@ -356,7 +356,7 @@ twsi1: i2c@d4011000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4011000 0x1000>; + reg = <0xd4011000 0x70>; interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; clocks = <&soc_clocks MMP2_CLK_TWSI0>; resets = <&soc_clocks MMP2_CLK_TWSI0>; @@ -368,7 +368,7 @@ twsi2: i2c@d4031000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4031000 0x1000>; + reg = <0xd4031000 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <0>; clocks = <&soc_clocks MMP2_CLK_TWSI1>; @@ -380,7 +380,7 @@ twsi3: i2c@d4032000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4032000 0x1000>; + reg = <0xd4032000 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <1>; clocks = <&soc_clocks MMP2_CLK_TWSI2>; @@ -392,7 +392,7 @@ twsi4: i2c@d4033000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4033000 0x1000>; + reg = <0xd4033000 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <2>; clocks = <&soc_clocks MMP2_CLK_TWSI3>; @@ -405,7 +405,7 @@ twsi5: i2c@d4033800 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4033800 0x1000>; + reg = <0xd4033800 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <3>; clocks = <&soc_clocks MMP2_CLK_TWSI4>; @@ -417,7 +417,7 @@ twsi6: i2c@d4034000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4034000 0x1000>; + reg = <0xd4034000 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <4>; clocks = <&soc_clocks MMP2_CLK_TWSI5>; diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts index fb928503ad45..d9be511f054f 100644 --- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts +++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts @@ -101,7 +101,7 @@ initial-mode = <1>; /* initialize in HUB mode */ disabled-ports = <1>; intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */ - reset-gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */ + reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */ connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */ refclk-frequency = <19200000>; }; diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index e7e4bb5ad8d5..fde84f123fbb 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -350,6 +350,7 @@ CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_DETECT_HUNG_TASK is not set diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 26d6dee67aa6..3608e55eaecd 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -462,6 +462,7 @@ CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set CONFIG_PROVE_LOCKING=y # CONFIG_DEBUG_BUGVERBOSE is not set diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 8c37cc8ab6f2..c32c338f7704 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -92,6 +92,7 @@ CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NETFILTER=y CONFIG_PHONET=m +CONFIG_NET_SWITCHDEV=y CONFIG_CAN=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m @@ -181,6 +182,7 @@ CONFIG_SMSC911X=y # CONFIG_NET_VENDOR_STMICRO is not set CONFIG_TI_DAVINCI_EMAC=y CONFIG_TI_CPSW=y +CONFIG_TI_CPSW_SWITCHDEV=y CONFIG_TI_CPTS=y # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set @@ -554,6 +556,6 @@ CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_SPLIT=y CONFIG_DEBUG_INFO_DWARF4=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_TI_CPSW_SWITCHDEV=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index bda57cafa2bc..de3830443613 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -212,4 +212,5 @@ CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index cea1c27c29cb..46e478fb5ea2 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -226,8 +226,8 @@ void release_thread(struct task_struct *dead_task) asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int -copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); @@ -261,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) - thread->tp_value[0] = childregs->ARM_r3; + thread->tp_value[0] = tls; thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); diff --git a/arch/arm/mach-bcm/bcm2711.c b/arch/arm/mach-bcm/bcm2711.c index dbe296798647..fa0300d8c79d 100644 --- a/arch/arm/mach-bcm/bcm2711.c +++ b/arch/arm/mach-bcm/bcm2711.c @@ -13,6 +13,7 @@ static const char * const bcm2711_compat[] = { #ifdef CONFIG_ARCH_MULTI_V7 "brcm,bcm2711", #endif + NULL }; DT_MACHINE_START(BCM2711, "BCM2711") diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index dd427bd2768c..02b180ad7245 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -9,6 +9,7 @@ menuconfig ARCH_DAVINCI select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF select REGMAP_MMIO + select RESET_CONTROLLER select HAVE_IDE select PINCTRL_SINGLE diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c index d8118031c51f..871f98342d50 100644 --- a/arch/arm/mach-imx/cpu.c +++ b/arch/arm/mach-imx/cpu.c @@ -84,7 +84,7 @@ struct device * __init imx_soc_device_init(void) const char *ocotp_compat = NULL; struct soc_device *soc_dev; struct device_node *root; - struct regmap *ocotp; + struct regmap *ocotp = NULL; const char *soc_id; u64 soc_uid = 0; u32 val; @@ -148,11 +148,11 @@ struct device * __init imx_soc_device_init(void) soc_id = "i.MX6UL"; break; case MXC_CPU_IMX6ULL: - ocotp_compat = "fsl,imx6ul-ocotp"; + ocotp_compat = "fsl,imx6ull-ocotp"; soc_id = "i.MX6ULL"; break; case MXC_CPU_IMX6ULZ: - ocotp_compat = "fsl,imx6ul-ocotp"; + ocotp_compat = "fsl,imx6ull-ocotp"; soc_id = "i.MX6ULZ"; break; case MXC_CPU_IMX6SLL: @@ -175,7 +175,9 @@ struct device * __init imx_soc_device_init(void) ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat); if (IS_ERR(ocotp)) pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat); + } + if (!IS_ERR_OR_NULL(ocotp)) { regmap_read(ocotp, OCOTP_UID_H, &val); soc_uid = val; regmap_read(ocotp, OCOTP_UID_L, &val); diff --git a/arch/arm/mach-mmp/pxa168.h b/arch/arm/mach-mmp/pxa168.h index 0331c58b07a2..dff651b9f252 100644 --- a/arch/arm/mach-mmp/pxa168.h +++ b/arch/arm/mach-mmp/pxa168.h @@ -17,9 +17,9 @@ extern void pxa168_clear_keypad_wakeup(void); #include <linux/platform_data/keypad-pxa27x.h> #include <linux/pxa168_eth.h> #include <linux/platform_data/mv_usb.h> +#include <linux/soc/mmp/cputype.h> #include "devices.h" -#include "cputype.h" extern struct pxa_device_desc pxa168_device_uart1; extern struct pxa_device_desc pxa168_device_uart2; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 110dcb3314d1..c65cfc1ad99b 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -207,7 +207,7 @@ static int __init mmp_dt_init_timer(struct device_node *np) ret = clk_prepare_enable(clk); if (ret) return ret; - rate = clk_get_rate(clk) / 2; + rate = clk_get_rate(clk); } else if (cpu_is_pj4()) { rate = 6500000; } else { diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index ad08d470a2ca..dca7d06c0b93 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -95,6 +95,7 @@ config ARCH_OMAP2PLUS bool select ARCH_HAS_BANDGAP select ARCH_HAS_HOLES_MEMORYMODEL + select ARCH_HAS_RESET_CONTROLLER select ARCH_OMAP select CLKSRC_MMIO select GENERIC_IRQ_CHIP @@ -105,11 +106,11 @@ config ARCH_OMAP2PLUS select OMAP_DM_TIMER select OMAP_GPMC select PINCTRL + select RESET_CONTROLLER select SOC_BUS select TI_SYSC select OMAP_IRQCHIP select CLKSRC_TI_32K - select ARCH_HAS_RESET_CONTROLLER help Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index ca52271de5a8..e95c224ffc4d 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -306,10 +306,14 @@ static void __init dra7x_evm_mmc_quirk(void) static struct clockdomain *ti_sysc_find_one_clockdomain(struct clk *clk) { + struct clk_hw *hw = __clk_get_hw(clk); struct clockdomain *clkdm = NULL; struct clk_hw_omap *hwclk; - hwclk = to_clk_hw_omap(__clk_get_hw(clk)); + hwclk = to_clk_hw_omap(hw); + if (!omap2_clk_is_hw_omap(hw)) + return NULL; + if (hwclk && hwclk->clkdm_name) clkdm = clkdm_lookup(hwclk->clkdm_name); diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 354e0e7025ae..1da11bdb1dfb 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -551,8 +551,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev) static int __init ve_spc_clk_init(void) { - int cpu; + int cpu, cluster; struct clk *clk; + bool init_opp_table[MAX_CLUSTERS] = { false }; if (!info) return 0; /* Continue only if SPC is initialised */ @@ -578,8 +579,17 @@ static int __init ve_spc_clk_init(void) continue; } + cluster = topology_physical_package_id(cpu_dev->id); + if (init_opp_table[cluster]) + continue; + if (ve_init_opp_table(cpu_dev)) pr_warn("failed to initialise cpu%d opp table\n", cpu); + else if (dev_pm_opp_set_sharing_cpus(cpu_dev, + topology_core_cpumask(cpu_dev->id))) + pr_warn("failed to mark OPPs shared for cpu%d\n", cpu); + else + init_opp_table[cluster] = true; } platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..e688dfad0b72 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -138,6 +138,7 @@ config ARM64 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts index 96ab0227e82d..121e6cc4849b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts @@ -15,7 +15,7 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; vmmc-supply = <®_dcdc1>; - vqmmc-supply = <®_dcdc1>; + vqmmc-supply = <®_eldo1>; bus-width = <8>; non-removable; cap-mmc-hw-reset; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts index 01a9a52edae4..393c1948a495 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts @@ -140,7 +140,7 @@ &mmc1 { pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; - vmmc-supply = <®_aldo2>; + vmmc-supply = <®_dcdc1>; vqmmc-supply = <®_dldo4>; mmc-pwrseq = <&wifi_pwrseq>; bus-width = <4>; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index 144a2c19ac02..d1fc9c2055f4 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -61,10 +61,10 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 120 8>, - <0 121 8>, - <0 122 8>, - <0 123 8>; + interrupts = <0 170 4>, + <0 171 4>, + <0 172 4>, + <0 173 4>; interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts index 5bd07469766b..a8bb3fa9fec9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts @@ -46,25 +46,47 @@ }; gpio-keys { - compatible = "gpio-keys-polled"; - poll-interval = <100>; + compatible = "gpio-keys"; key1 { label = "A"; linux,code = <BTN_0>; gpios = <&gpio GPIOH_6 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <34 IRQ_TYPE_EDGE_BOTH>; }; key2 { label = "B"; linux,code = <BTN_1>; gpios = <&gpio GPIOH_7 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <35 IRQ_TYPE_EDGE_BOTH>; }; key3 { label = "C"; linux,code = <BTN_2>; gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <2 IRQ_TYPE_EDGE_BOTH>; + }; + + mic_mute { + label = "MicMute"; + linux,code = <SW_MUTE_DEVICE>; + linux,input-type = <EV_SW>; + gpios = <&gpio_ao GPIOE_2 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <99 IRQ_TYPE_EDGE_BOTH>; + }; + + power_key { + label = "PowerKey"; + linux,code = <KEY_POWER>; + gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <3 IRQ_TYPE_EDGE_BOTH>; }; }; @@ -569,6 +591,8 @@ bluetooth { compatible = "brcm,bcm43438-bt"; + interrupt-parent = <&gpio_intc>; + interrupts = <95 IRQ_TYPE_LEVEL_HIGH>; shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; max-speed = <2000000>; clocks = <&wifi32k>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 8e8a77eb596a..a6f9b7784e8f 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -88,7 +88,7 @@ reboot { compatible ="syscon-reboot"; - regmap = <&dcfg>; + regmap = <&rst>; offset = <0xb0>; mask = <0x02>; }; @@ -175,7 +175,13 @@ dcfg: syscon@1e00000 { compatible = "fsl,ls1028a-dcfg", "syscon"; reg = <0x0 0x1e00000 0x0 0x10000>; - big-endian; + little-endian; + }; + + rst: syscon@1e60000 { + compatible = "syscon"; + reg = <0x0 0x1e60000 0x0 0x10000>; + little-endian; }; scfg: syscon@1fc0000 { @@ -584,7 +590,7 @@ 0x00010004 0x0000003d 0x00010005 0x00000045 0x00010006 0x0000004d - 0x00010007 0x00000045 + 0x00010007 0x00000055 0x00010008 0x0000005e 0x00010009 0x00000066 0x0001000a 0x0000006e diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index 6edbdfe2d0d7..3d95b66a2d71 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -740,7 +740,7 @@ reg = <0x30bd0000 0x10000>; interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>, - <&clk IMX8MM_CLK_SDMA1_ROOT>; + <&clk IMX8MM_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts index 2a759dff9f87..596bc65f475c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts @@ -421,7 +421,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_imu>; interrupt-parent = <&gpio3>; - interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + interrupts = <19 IRQ_TYPE_LEVEL_HIGH>; vdd-supply = <®_3v3_p>; vddio-supply = <®_3v3_p>; }; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index 94090c6fb946..d43e1299c8ef 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -60,10 +60,10 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 120 8>, - <0 121 8>, - <0 122 8>, - <0 123 8>; + interrupts = <0 170 4>, + <0 171 4>, + <0 172 4>, + <0 173 4>; interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, diff --git a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts index 76b49f573101..16f1656d5203 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts @@ -49,7 +49,8 @@ ir-receiver { compatible = "gpio-ir-receiver"; - gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_HIGH>; + gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_LOW>; + linux,rc-map-name = "rc-beelink-gs1"; }; }; diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 8dc6c5cdabe6..baf52baaa2a5 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -85,13 +85,12 @@ #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_READONLY #define __P011 PAGE_READONLY -#define __P100 PAGE_EXECONLY +#define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC @@ -100,7 +99,7 @@ #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_EXECONLY +#define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5d15b4735a0e..cd5de0e40bfa 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -/* - * Execute-only user mappings do not have the PTE_USER bit set. All valid - * kernel mappings have the PTE_UXN bit set. - */ #define pte_valid_not_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) #define pte_valid_user(pte) \ @@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; /* * p??_access_permitted() is true for valid user mappings (subject to the - * write permission check) other than user execute-only which do not have the - * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + * write permission check). PROT_NONE mappings do not have the PTE_VALID bit + * set. */ #define pte_access_permitted(pte, write) \ (pte_valid_user(pte) && (!(write) || pte_write(pte))) diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 2629a68b8724..5af82587909e 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -42,7 +42,6 @@ #endif #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #ifndef __COMPAT_SYSCALL_NR #include <uapi/asm/unistd.h> diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index 4703d218663a..f83a70e07df8 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -19,5 +19,6 @@ #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS +#define __ARCH_WANT_SYS_CLONE3 #include <asm-generic/unistd.h> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6a09ca7644ea..85f4bec22f6d 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -547,6 +547,7 @@ static const struct midr_range spectre_v2_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), { /* sentinel */ } }; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 71f788cd2b18..d54586d5b031 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -360,8 +360,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) asmlinkage void ret_from_fork(void) asm("ret_from_fork"); -int copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -394,11 +394,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, } /* - * If a TLS pointer was passed to clone (4th argument), use it - * for the new thread. + * If a TLS pointer was passed to clone, use it for the new + * thread. */ if (clone_flags & CLONE_SETTLS) - p->thread.uw.tp_value = childregs->regs[3]; + p->thread.uw.tp_value = tls; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 46822afc57e0..9f2165937f7d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2098,9 +2098,9 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, WARN_ON(1); } - kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n", - cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); - print_sys_reg_instr(params); + print_sys_reg_msg(params, + "Unsupported guest CP%d access at: %08lx [%08lx]\n", + cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); kvm_inject_undefined(vcpu); } @@ -2233,6 +2233,12 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) NULL, 0); } +static bool is_imp_def_sys_reg(struct sys_reg_params *params) +{ + // See ARM DDI 0487E.a, section D12.3.2 + return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011; +} + static int emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params) { @@ -2248,10 +2254,12 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu, if (likely(r)) { perform_access(vcpu, params, r); + } else if (is_imp_def_sys_reg(params)) { + kvm_inject_undefined(vcpu); } else { - kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n", - *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); - print_sys_reg_instr(params); + print_sys_reg_msg(params, + "Unsupported guest sys_reg access at: %lx [%08lx]\n", + *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); kvm_inject_undefined(vcpu); } return 1; @@ -2360,8 +2368,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) return NULL; + if (!index_to_params(id, ¶ms)) + return NULL; + table = get_target_table(vcpu->arch.target, true, &num); - r = find_reg_by_id(id, ¶ms, table, num); + r = find_reg(¶ms, table, num); if (!r) r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index 9bca0312d798..5a6fc30f5989 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -62,11 +62,24 @@ struct sys_reg_desc { #define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */ #define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */ -static inline void print_sys_reg_instr(const struct sys_reg_params *p) +static __printf(2, 3) +inline void print_sys_reg_msg(const struct sys_reg_params *p, + char *fmt, ...) { + va_list va; + + va_start(va, fmt); /* Look, we even formatted it for you to paste into the table! */ - kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", + kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", + &(struct va_format){ fmt, &va }, p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); + va_end(va); +} + +static inline void print_sys_reg_instr(const struct sys_reg_params *p) +{ + /* GCC warns on an empty format string */ + print_sys_reg_msg(p, "%s", ""); } static inline bool ignore_write(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 077b02a2d4d3..85566d32958f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, const struct fault_info *inf; struct mm_struct *mm = current->mm; vm_fault_t fault, major = 0; - unsigned long vm_flags = VM_READ | VM_WRITE; + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (kprobe_page_fault(regs, esr)) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 5a3b15a14a7f..40797cbfba2d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; /* * FIXME: Cleanup page tables (also in arch_add_memory() in case @@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be * unlocked yet. */ - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 12cd9231c4b8..0231d69c8bf2 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%2);\n" \ " %1 = "#op "(%0,%3);\n" \ " memw_locked(%2,P3)=%1;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output), "=&r" (val) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) " }" " memw_locked(%2, p3) = %1;" " {" - " if !p3 jump 1b;" + " if (!p3) jump 1b;" " }" "2:" : "=&r" (__oldval), "=&r" (tmp) diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 47384b094b94..71429f756af0 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -223,7 +223,7 @@ static inline int ffs(int x) int r; asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" : "=&r" (r) : "r" (x) : "p0"); diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index 6091322c3af9..92b8a02e588a 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __asm__ __volatile__ ( "1: %0 = memw_locked(%1);\n" /* load into retval */ " memw_locked(%1,P0) = %2;\n" /* store into memory */ - " if !P0 jump 1b;\n" + " if (!P0) jump 1b;\n" : "=&r" (retval) : "r" (ptr), "r" (x) : "memory", "p0" diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index cb635216a732..0191f7c7193e 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -16,7 +16,7 @@ /* For example: %1 = %4 */ \ insn \ "2: memw_locked(%3,p2) = %1;\n" \ - " if !p2 jump 1b;\n" \ + " if (!p2) jump 1b;\n" \ " %1 = #0;\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ @@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "1: %1 = memw_locked(%3)\n" " {\n" " p2 = cmp.eq(%1,%4)\n" - " if !p2.new jump:NT 3f\n" + " if (!p2.new) jump:NT 3f\n" " }\n" "2: memw_locked(%3,p2) = %5\n" - " if !p2 jump 1b\n" + " if (!p2) jump 1b\n" "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 539e3efcf39c..b0dbc3473172 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr) void __iomem *ioremap(unsigned long phys_addr, unsigned long size); #define ioremap_nocache ioremap +#define ioremap_uc(X, Y) ioremap((X), (Y)) #define __raw_writel writel diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h index bfe07d842ff3..ef103b73bec8 100644 --- a/arch/hexagon/include/asm/spinlock.h +++ b/arch/hexagon/include/asm/spinlock.h @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) "1: R6 = memw_locked(%0);\n" " R6 = add(R6,#-1);\n" " memw_locked(%0,P3) = R6\n" - " if !P3 jump 1b;\n" + " if (!P3) jump 1b;\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " { %0 = P3 }\n" "1:\n" @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0)\n" " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1)\n" " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1b; R6 = #1; }\n" + " { if (!P3) jump 1b; R6 = #1; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c index 35f29423fda8..5ed02f699479 100644 --- a/arch/hexagon/kernel/stacktrace.c +++ b/arch/hexagon/kernel/stacktrace.c @@ -11,8 +11,6 @@ #include <linux/thread_info.h> #include <linux/module.h> -register unsigned long current_frame_pointer asm("r30"); - struct stackframe { unsigned long fp; unsigned long rets; @@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace) low = (unsigned long)task_stack_page(current); high = low + THREAD_SIZE; - fp = current_frame_pointer; + fp = (unsigned long)__builtin_frame_address(0); while (fp >= low && fp <= (high - sizeof(*frame))) { frame = (struct stackframe *)fp; diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S index 12242c27e2df..4023fdbea490 100644 --- a/arch/hexagon/kernel/vm_entry.S +++ b/arch/hexagon/kernel/vm_entry.S @@ -369,7 +369,7 @@ ret_from_fork: R26.L = #LO(do_work_pending); R0 = #VM_INT_DISABLE; } - if P0 jump check_work_pending + if (P0) jump check_work_pending { R0 = R25; callr R24 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 58fd67068bac..b01d68a2d5d9 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index add388236f4e..ed8e28b0fb3e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -47,7 +47,7 @@ config MIPS select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES select HAVE_ASM_MODVERSIONS - select HAVE_EBPF_JIT if (!CPU_MICROMIPS) + select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 172801ed35b8..d859f079b771 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \ -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS) +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + # decompressor objects (linked with vmlinuz) vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index c46c59b0f1b4..49f0061a6051 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -15,7 +15,8 @@ static inline int __pure __get_cpu_type(const int cpu_type) { switch (cpu_type) { -#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF) +#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \ + defined(CONFIG_SYS_HAS_CPU_LOONGSON2F) case CPU_LOONGSON2EF: #endif diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 4993db40482c..ee26f9a4575d 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -49,8 +49,26 @@ struct thread_info { .addr_limit = KERNEL_DS, \ } -/* How to get the thread information struct from C. */ +/* + * A pointer to the struct thread_info for the currently executing thread is + * held in register $28/$gp. + * + * We declare __current_thread_info as a global register variable rather than a + * local register variable within current_thread_info() because clang doesn't + * support explicit local register variables. + * + * When building the VDSO we take care not to declare the global register + * variable because this causes GCC to not preserve the value of $28/$gp in + * functions that change its value (which is common in the PIC VDSO when + * accessing the GOT). Since the VDSO shouldn't be accessing + * __current_thread_info anyway we declare it extern in order to cause a link + * failure if it's referenced. + */ +#ifdef __VDSO__ +extern struct thread_info *__current_thread_info; +#else register struct thread_info *__current_thread_info __asm__("$28"); +#endif static inline struct thread_info *current_thread_info(void) { diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h index b08825531e9f..0ae9b4cbc153 100644 --- a/arch/mips/include/asm/vdso/gettimeofday.h +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -26,8 +26,6 @@ #define __VDSO_USE_SYSCALL ULLONG_MAX -#ifdef CONFIG_MIPS_CLOCK_VSYSCALL - static __always_inline long gettimeofday_fallback( struct __kernel_old_timeval *_tv, struct timezone *_tz) @@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback( return error ? -ret : ret; } -#else - -static __always_inline long gettimeofday_fallback( - struct __kernel_old_timeval *_tv, - struct timezone *_tz) -{ - return -1; -} - -#endif - static __always_inline long clock_gettime_fallback( clockid_t _clkid, struct __kernel_timespec *_ts) diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c index f777e44653d5..47312c529410 100644 --- a/arch/mips/kernel/cacheinfo.c +++ b/arch/mips/kernel/cacheinfo.c @@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu) return 0; } +static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + + for_each_possible_cpu(cpu1) + if (cpus_are_siblings(cpu, cpu1)) + cpumask_set_cpu(cpu1, cpu_map); +} + +static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + int cluster = cpu_cluster(&cpu_data[cpu]); + + for_each_possible_cpu(cpu1) + if (cpu_cluster(&cpu_data[cpu1]) == cluster) + cpumask_set_cpu(cpu1, cpu_map); +} + static int __populate_cache_leaves(unsigned int cpu) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu) struct cacheinfo *this_leaf = this_cpu_ci->info_list; if (c->icache.waysize) { + /* L1 caches are per core */ + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); } else { populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); } - if (c->scache.waysize) + if (c->scache.waysize) { + /* L2 cache is per cluster */ + fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); + } if (c->tcache.waysize) populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 46b76751f3a5..561154cbcc40 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) { int off, b_off; + int tcc_reg; ctx->flags |= EBPF_SEEN_TC; /* @@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) b_off = b_imm(this_idx + 1, ctx); emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); /* - * if (--TCC < 0) + * if (TCC-- < 0) * goto out; */ /* Delay slot */ - emit_instr(ctx, daddiu, MIPS_R_T5, - (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); + tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4; + emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1); b_off = b_imm(this_idx + 1, ctx); - emit_instr(ctx, bltz, MIPS_R_T5, b_off); + emit_instr(ctx, bltz, tcc_reg, b_off); /* * prog = array->ptrs[index]; * if (prog == NULL) @@ -1803,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) unsigned int image_size; u8 *image_ptr; - if (!prog->jit_requested || MIPS_ISA_REV < 2) + if (!prog->jit_requested) return prog; tmp = bpf_jit_blind_constants(prog); diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c index 6ebdc37c89fc..6b83b6376a4b 100644 --- a/arch/mips/vdso/vgettimeofday.c +++ b/arch/mips/vdso/vgettimeofday.c @@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock, return __cvdso_clock_gettime32(clock, ts); } +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + int __vdso_clock_getres(clockid_t clock_id, struct old_timespec32 *res) { @@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock, return __cvdso_clock_gettime(clock, ts); } +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index d9ac7e6408ef..caddded56e77 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h @@ -9,7 +9,11 @@ #define PG_dcache_dirty PG_arch_1 void flush_icache_range(unsigned long start, unsigned long end); +#define flush_icache_range flush_icache_range + void flush_icache_page(struct vm_area_struct *vma, struct page *page); +#define flush_icache_page flush_icache_page + #ifdef CONFIG_CPU_CACHE_ALIASING void flush_cache_mm(struct mm_struct *mm); void flush_cache_dup_mm(struct mm_struct *mm); @@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size); #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) #else -#include <asm-generic/cacheflush.h> -#undef flush_icache_range -#undef flush_icache_page -#undef flush_icache_user_range void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); +#define flush_icache_user_range flush_icache_user_range + +#include <asm-generic/cacheflush.h> #endif #endif /* __NDS32_CACHEFLUSH_H__ */ diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index 0214e4150539..6abc58ac406d 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h @@ -195,7 +195,7 @@ extern void paging_init(void); #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) -#define pmd_off_k(address) pmd_offset(pgd_offset_k(address), address) +#define pmd_off_k(address) pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address)) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b16237c95ea3..0c29d6cb2c8d 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -62,6 +62,7 @@ config PARISC select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE select HAVE_KPROBES_ON_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_COPY_THREAD_TLS help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index f627c37dad9c..ab5c215cf46c 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size) ** if (((unsigned long)p & 0xf) == 0) ** return __ldcw(p); */ -#define xchg(ptr, x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) +#define xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __typeof__(*(ptr)) _x_ = (x); \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ + __ret; \ +}) /* bug catcher for when unsupported size is used - won't link */ extern void __cmpxchg_called_with_bad_pointer(void); diff --git a/arch/parisc/include/asm/kexec.h b/arch/parisc/include/asm/kexec.h index a99ea747d7ed..87e174006995 100644 --- a/arch/parisc/include/asm/kexec.h +++ b/arch/parisc/include/asm/kexec.h @@ -2,8 +2,6 @@ #ifndef _ASM_PARISC_KEXEC_H #define _ASM_PARISC_KEXEC_H -#ifdef CONFIG_KEXEC - /* Maximum physical address we can use pages from */ #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* Maximum address we can reach in physical address mode */ @@ -32,6 +30,4 @@ static inline void crash_setup_regs(struct pt_regs *newregs, #endif /* __ASSEMBLY__ */ -#endif /* CONFIG_KEXEC */ - #endif /* _ASM_PARISC_KEXEC_H */ diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 2663c8f8be11..068d90950d93 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -37,5 +37,5 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_KEXEC) += kexec.o relocate_kernel.o +obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o obj-$(CONFIG_KEXEC_FILE) += kexec_file.o diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 3b330e58a4f0..a5f3e50fe976 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -810,7 +810,7 @@ EXPORT_SYMBOL(device_to_hwpath); static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, struct device *parent); -static void walk_lower_bus(struct parisc_device *dev) +static void __init walk_lower_bus(struct parisc_device *dev) { unsigned long io_io_low, io_io_high; @@ -889,8 +889,8 @@ static void print_parisc_device(struct parisc_device *dev) static int count; print_pa_hwpath(dev, hw_path); - pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", - ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, + pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", + ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type, dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); if (dev->num_addrs) { diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 36434d4da381..749c4579db0d 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -327,8 +327,7 @@ static int pdt_mainloop(void *unused) ((pde & PDT_ADDR_SINGLE_ERR) == 0)) memory_failure(pde >> PAGE_SHIFT, 0); else - soft_offline_page( - pfn_to_page(pde >> PAGE_SHIFT), 0); + soft_offline_page(pde >> PAGE_SHIFT, 0); #else pr_crit("PDT: memory error at 0x%lx ignored.\n" "Rebuild kernel with CONFIG_MEMORY_FAILURE=y " diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index ecc5c2771208..230a6422b99f 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init); * Copy architecture-specific thread state */ int -copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p) +copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long kthread_arg, struct task_struct *p, unsigned long tls) { struct pt_regs *cregs = &(p->thread.regs); void *stack = task_stack_page(p); @@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp, cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; cregs->kpc = (unsigned long) &child_return; - /* Setup thread TLS area from the 4th parameter in clone */ + /* Setup thread TLS area */ if (clone_flags & CLONE_SETTLS) - cregs->cr27 = cregs->gr[23]; + cregs->cr27 = tls; } return 0; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ddca8287d43b..354cf060b67f 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -401,7 +401,7 @@ static void __init map_pages(unsigned long start_vaddr, pmd = (pmd_t *) __pa(pmd); } - pgd_populate(NULL, pg_dir, __va(pmd)); + pud_populate(NULL, (pud_t *)pg_dir, __va(pmd)); #endif pg_dir++; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index e9a960e28f3c..860228e917dc 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -15,6 +15,7 @@ * * (the type definitions are in asm/spinlock_types.h) */ +#include <linux/jump_label.h> #include <linux/irqflags.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> @@ -36,10 +37,12 @@ #endif #ifdef CONFIG_PPC_PSERIES +DECLARE_STATIC_KEY_FALSE(shared_processor); + #define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(int cpu) { - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + if (!static_branch_unlikely(&shared_processor)) return false; return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); } @@ -110,13 +113,8 @@ static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; static inline bool is_shared_processor(void) { -/* - * LPPACA is only available on Pseries so guard anything LPPACA related to - * allow other platforms (which include this common header) to compile. - */ -#ifdef CONFIG_PPC_PSERIES - return (IS_ENABLED(CONFIG_PPC_SPLPAR) && - lppaca_shared_proc(local_paca->lppaca_ptr)); +#ifdef CONFIG_PPC_SPLPAR + return static_branch_unlikely(&shared_processor); #else return false; #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 15002b51ff18..c92fe7fe9692 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) return n; } -extern unsigned long __clear_user(void __user *addr, unsigned long size); +unsigned long __arch_clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) { @@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) might_fault(); if (likely(access_ok(addr, size))) { allow_write_to_user(addr, size); - ret = __clear_user(addr, size); + ret = __arch_clear_user(addr, size); prevent_write_to_user(addr, size); } return ret; } +static inline unsigned long __clear_user(void __user *addr, unsigned long size) +{ + return clear_user(addr, size); +} + extern long strncpy_from_user(char *dst, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5645bc9cbc09..add67498c126 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -619,8 +619,6 @@ void __do_irq(struct pt_regs *regs) trace_irq_entry(regs); - check_stack_overflow(); - /* * Query the platform PIC for the interrupt & ack it. * @@ -652,6 +650,8 @@ void do_IRQ(struct pt_regs *regs) irqsp = hardirq_ctx[raw_smp_processor_id()]; sirqsp = softirq_ctx[raw_smp_processor_id()]; + check_stack_overflow(); + /* Already there ? */ if (unlikely(cursp == irqsp || cursp == sirqsp)) { __do_irq(regs); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index dc53578193ee..6ff3f896d908 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4983,7 +4983,8 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); kvm->arch.process_table = 0; - uv_svm_terminate(kvm->arch.lpid); + if (kvm->arch.secure_guest) + uv_svm_terminate(kvm->arch.lpid); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 0496e66aaa56..c6fbbd29bd87 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1117,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r7, VCPU_GPR(R7)(r4) bne ret_to_ultra - lwz r0, VCPU_CR(r4) + ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R0)(r4) @@ -1137,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) * R3 = UV_RETURN */ ret_to_ultra: - lwz r0, VCPU_CR(r4) + ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R3)(r4) diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S index f69a6aab7bfb..1ddb26394e8a 100644 --- a/arch/powerpc/lib/string_32.S +++ b/arch/powerpc/lib/string_32.S @@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) -_GLOBAL(__clear_user) +_GLOBAL(__arch_clear_user) /* * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination @@ -87,4 +87,4 @@ _GLOBAL(__clear_user) EX_TABLE(8b, 91b) EX_TABLE(9b, 91b) -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index 507b18b1660e..169872bc0892 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -17,7 +17,7 @@ PPC64_CACHES: .section ".text" /** - * __clear_user: - Zero a block of memory in user space, with less checking. + * __arch_clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * @@ -58,7 +58,7 @@ err3; stb r0,0(r3) mr r3,r4 blr -_GLOBAL_TOC(__clear_user) +_GLOBAL_TOC(__arch_clear_user) cmpdi r4,32 neg r6,r3 li r0,0 @@ -181,4 +181,4 @@ err1; dcbz 0,r3 cmpdi r4,32 blt .Lshort_clear b .Lmedium_clear -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 9488b63dfc87..f5535eae637f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); int ret; - __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); @@ -289,6 +288,14 @@ void __init mem_init(void) BUILD_BUG_ON(MMU_PAGE_COUNT > 16); #ifdef CONFIG_SWIOTLB + /* + * Some platforms (e.g. 85xx) limit DMA-able memory way below + * 4G. We force memblock to bottom-up mode to ensure that the + * memory allocated in swiotlb_init() is DMA-able. + * As it's the last memblock allocation, no need to reset it + * back to to-down. + */ + memblock_set_bottom_up(true); swiotlb_init(0); #endif diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 090af2d2d3e4..96eb8e43f39b 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -103,7 +103,7 @@ static void mmu_patch_addis(s32 *site, long simm) patch_instruction_site(site, instr); } -void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) +static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) { unsigned long s = offset; unsigned long v = PAGE_OFFSET + s; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 42bbcd47cc85..dffe1a45b6ed 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { #endif -static inline bool slice_addr_is_low(unsigned long addr) +static inline notrace bool slice_addr_is_low(unsigned long addr) { u64 tmp = (u64)addr; @@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, mm_ctx_user_psize(¤t->mm->context), 1); } -unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) +unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) { unsigned char *psizes; int index, mask_index; diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 91571841df8a..9dba7e880885 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -539,6 +539,16 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, /* balloon page list reference */ get_page(newpage); + /* + * When we migrate a page to a different zone, we have to fixup the + * count of both involved zones as we adjusted the managed page count + * when inflating. + */ + if (page_zone(page) != page_zone(newpage)) { + adjust_managed_page_count(page, 1); + adjust_managed_page_count(newpage, -1); + } + spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_insert(b_dev_info, newpage); balloon_page_delete(page); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 0a40201f315f..0c8421dd01ab 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -74,6 +74,9 @@ #include "pseries.h" #include "../../../../drivers/pci/pci.h" +DEFINE_STATIC_KEY_FALSE(shared_processor); +EXPORT_SYMBOL_GPL(shared_processor); + int CMO_PrPSP = -1; int CMO_SecPSP = -1; unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); @@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); + + if (lppaca_shared_proc(get_lppaca())) + static_branch_enable(&shared_processor); + ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; #ifdef CONFIG_PCI_IOV diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 759ffb00267c..fa7dc03459e7 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -64,6 +64,8 @@ config RISCV select SPARSEMEM_STATIC if 32BIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU + select ARCH_HAS_GCOV_PROFILE_ALL + select HAVE_COPY_THREAD_TLS config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT @@ -154,7 +156,7 @@ config GENERIC_HWEIGHT def_bool y config FIX_EARLYCON_MEM - def_bool CONFIG_MMU + def_bool MMU config PGTABLE_LEVELS int diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi index 70a1891e7cd0..a2e3d54e830c 100644 --- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi @@ -54,6 +54,7 @@ reg = <1>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu1_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -77,6 +78,7 @@ reg = <2>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu2_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -100,6 +102,7 @@ reg = <3>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu3_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -123,6 +126,7 @@ reg = <4>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu4_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -253,6 +257,17 @@ #pwm-cells = <3>; status = "disabled"; }; + l2cache: cache-controller@2010000 { + compatible = "sifive,fu540-c000-ccache", "cache"; + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <1024>; + cache-size = <2097152>; + cache-unified; + interrupt-parent = <&plic0>; + interrupts = <1 2 3>; + reg = <0x0 0x2010000 0x0 0x1000>; + }; }; }; diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index dd62b691c443..27e005fca584 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -5,4 +5,8 @@ #include <linux/ftrace.h> #include <asm-generic/asm-prototypes.h> +long long __lshrti3(long long a, int b); +long long __ashrti3(long long a, int b); +long long __ashlti3(long long a, int b); + #endif /* _ASM_RISCV_PROTOTYPES_H */ diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 0a62d2d68455..435b65532e29 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -116,9 +116,9 @@ # define SR_PIE SR_MPIE # define SR_PP SR_MPP -# define IRQ_SOFT IRQ_M_SOFT -# define IRQ_TIMER IRQ_M_TIMER -# define IRQ_EXT IRQ_M_EXT +# define RV_IRQ_SOFT IRQ_M_SOFT +# define RV_IRQ_TIMER IRQ_M_TIMER +# define RV_IRQ_EXT IRQ_M_EXT #else /* CONFIG_RISCV_M_MODE */ # define CSR_STATUS CSR_SSTATUS # define CSR_IE CSR_SIE @@ -133,15 +133,15 @@ # define SR_PIE SR_SPIE # define SR_PP SR_SPP -# define IRQ_SOFT IRQ_S_SOFT -# define IRQ_TIMER IRQ_S_TIMER -# define IRQ_EXT IRQ_S_EXT +# define RV_IRQ_SOFT IRQ_S_SOFT +# define RV_IRQ_TIMER IRQ_S_TIMER +# define RV_IRQ_EXT IRQ_S_EXT #endif /* CONFIG_RISCV_M_MODE */ /* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */ -#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT) -#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER) -#define IE_EIE (_AC(0x1, UL) << IRQ_EXT) +#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT) +#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) +#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) #ifndef __ASSEMBLY__ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 7ff0ed4f292e..36ae01761352 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -90,6 +90,27 @@ extern pgd_t swapper_pg_dir[]; #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) +#define VMALLOC_END (PAGE_OFFSET - 1) +#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) + +/* + * Roughly size the vmemmap space to be large enough to fit enough + * struct pages to map half the virtual address space. Then + * position vmemmap directly below the VMALLOC region. + */ +#define VMEMMAP_SHIFT \ + (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) +#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) +#define VMEMMAP_END (VMALLOC_START - 1) +#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) + +/* + * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel + * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. + */ +#define vmemmap ((struct page *)VMEMMAP_START) + static inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); @@ -400,23 +421,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) -#define VMALLOC_END (PAGE_OFFSET - 1) -#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) - -/* - * Roughly size the vmemmap space to be large enough to fit enough - * struct pages to map half the virtual address space. Then - * position vmemmap directly below the VMALLOC region. - */ -#define VMEMMAP_SHIFT \ - (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) -#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) -#define VMEMMAP_END (VMALLOC_START - 1) -#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) - -#define vmemmap ((struct page *)VMEMMAP_START) - #define PCI_IO_SIZE SZ_16M #define PCI_IO_END VMEMMAP_START #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index a1349ca64669..e163b7b64c86 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -246,6 +246,7 @@ check_syscall_nr: */ li t1, -1 beq a7, t1, ret_from_syscall_rejected + blt a7, t1, 1f /* Call syscall */ la s0, sys_call_table slli t0, a7, RISCV_LGPTR diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index b94d8db5ddcc..c40fdcdeb950 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, */ old = *parent; - if (function_graph_enter(old, self_addr, frame_pointer, parent)) + if (!function_graph_enter(old, self_addr, frame_pointer, parent)) *parent = return_hooker; } diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 84a6f0a4b120..a4242be66966 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -80,7 +80,9 @@ _start_kernel: #ifdef CONFIG_SMP li t0, CONFIG_NR_CPUS - bgeu a0, t0, .Lsecondary_park + blt a0, t0, .Lgood_cores + tail .Lsecondary_park +.Lgood_cores: #endif /* Pick one hart to run the main boot sequence */ @@ -209,11 +211,6 @@ relocate: tail smp_callin #endif -.align 2 -.Lsecondary_park: - /* We lack SMP support or have too many harts, so park this hart */ - wfi - j .Lsecondary_park END(_start) #ifdef CONFIG_RISCV_M_MODE @@ -246,12 +243,12 @@ ENTRY(reset_regs) li t4, 0 li t5, 0 li t6, 0 - csrw sscratch, 0 + csrw CSR_SCRATCH, 0 #ifdef CONFIG_FPU csrr t0, CSR_MISA andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) - bnez t0, .Lreset_regs_done + beqz t0, .Lreset_regs_done li t1, SR_FS csrs CSR_STATUS, t1 @@ -295,6 +292,13 @@ ENTRY(reset_regs) END(reset_regs) #endif /* CONFIG_RISCV_M_MODE */ +.section ".text", "ax",@progbits +.align 2 +.Lsecondary_park: + /* We lack SMP support or have too many harts, so park this hart */ + wfi + j .Lsecondary_park + __PAGE_ALIGNED_BSS /* Empty zero page */ .balign PAGE_SIZE diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index 3f07a91d5afb..345c4f2eba13 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -23,11 +23,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) irq_enter(); switch (regs->cause & ~CAUSE_IRQ_FLAG) { - case IRQ_TIMER: + case RV_IRQ_TIMER: riscv_timer_interrupt(); break; #ifdef CONFIG_SMP - case IRQ_SOFT: + case RV_IRQ_SOFT: /* * We only use software interrupts to pass IPIs, so if a non-SMP * system gets one, then we don't know what to do. @@ -35,7 +35,7 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) riscv_software_interrupt(); break; #endif - case IRQ_EXT: + case RV_IRQ_EXT: handle_arch_irq(regs); break; default: diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 95a3031e5c7c..817cf7b0974c 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long arg, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -121,7 +121,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (usp) /* User fork */ childregs->sp = usp; if (clone_flags & CLONE_SETTLS) - childregs->tp = childregs->a5; + childregs->tp = tls; childregs->a0 = 0; /* Return value of fork() */ p->thread.ra = (unsigned long)ret_from_fork; } diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c index 4800cf703186..2a02b7eebee0 100644 --- a/arch/riscv/kernel/riscv_ksyms.c +++ b/arch/riscv/kernel/riscv_ksyms.c @@ -9,8 +9,5 @@ /* * Assembly functions that may be used (directly or indirectly) by modules */ -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__asm_copy_to_user); -EXPORT_SYMBOL(__asm_copy_from_user); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 49a5852fd07d..33b16f4212f7 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -58,7 +58,8 @@ quiet_cmd_vdsold = VDSOLD $@ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \ $(CROSS_COMPILE)objcopy \ - $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp # install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S index 15f9d54c7db6..ef90075c4b0a 100644 --- a/arch/riscv/lib/tishift.S +++ b/arch/riscv/lib/tishift.S @@ -4,34 +4,73 @@ */ #include <linux/linkage.h> +#include <asm-generic/export.h> -ENTRY(__lshrti3) +SYM_FUNC_START(__lshrti3) beqz a2, .L1 li a5,64 sub a5,a5,a2 - addi sp,sp,-16 sext.w a4,a5 blez a5, .L2 sext.w a2,a2 - sll a4,a1,a4 srl a0,a0,a2 - srl a1,a1,a2 + sll a4,a1,a4 + srl a2,a1,a2 or a0,a0,a4 - sd a1,8(sp) - sd a0,0(sp) - ld a0,0(sp) - ld a1,8(sp) - addi sp,sp,16 - ret + mv a1,a2 .L1: ret .L2: - negw a4,a4 - srl a1,a1,a4 - sd a1,0(sp) - sd zero,8(sp) - ld a0,0(sp) - ld a1,8(sp) - addi sp,sp,16 + negw a0,a4 + li a2,0 + srl a0,a1,a0 + mv a1,a2 + ret +SYM_FUNC_END(__lshrti3) +EXPORT_SYMBOL(__lshrti3) + +SYM_FUNC_START(__ashrti3) + beqz a2, .L3 + li a5,64 + sub a5,a5,a2 + sext.w a4,a5 + blez a5, .L4 + sext.w a2,a2 + srl a0,a0,a2 + sll a4,a1,a4 + sra a2,a1,a2 + or a0,a0,a4 + mv a1,a2 +.L3: + ret +.L4: + negw a0,a4 + srai a2,a1,0x3f + sra a0,a1,a0 + mv a1,a2 + ret +SYM_FUNC_END(__ashrti3) +EXPORT_SYMBOL(__ashrti3) + +SYM_FUNC_START(__ashlti3) + beqz a2, .L5 + li a5,64 + sub a5,a5,a2 + sext.w a4,a5 + blez a5, .L6 + sext.w a2,a2 + sll a1,a1,a2 + srl a4,a0,a4 + sll a2,a0,a2 + or a1,a1,a4 + mv a0,a2 +.L5: + ret +.L6: + negw a1,a4 + li a2,0 + sll a1,a0,a1 + mv a0,a2 ret -ENDPROC(__lshrti3) +SYM_FUNC_END(__ashlti3) +EXPORT_SYMBOL(__ashlti3) diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index fecd65657a6f..f29d2ba2c0a6 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm-generic/export.h> #include <asm/asm.h> #include <asm/csr.h> @@ -66,6 +67,8 @@ ENTRY(__asm_copy_from_user) j 3b ENDPROC(__asm_copy_to_user) ENDPROC(__asm_copy_from_user) +EXPORT_SYMBOL(__asm_copy_to_user) +EXPORT_SYMBOL(__asm_copy_from_user) ENTRY(__clear_user) @@ -108,6 +111,7 @@ ENTRY(__clear_user) bltu a0, a3, 5b j 3b ENDPROC(__clear_user) +EXPORT_SYMBOL(__clear_user) .section .fixup,"ax" .balign 4 diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 3c8b33258457..a1bd95c8047a 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -10,7 +10,6 @@ obj-y += extable.o obj-$(CONFIG_MMU) += fault.o obj-y += cacheflush.o obj-y += context.o -obj-y += sifive_l2_cache.o ifeq ($(CONFIG_MMU),y) obj-$(CONFIG_SMP) += tlbflush.o diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 8f1900686640..8930ab7278e6 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -22,6 +22,7 @@ void flush_icache_all(void) else on_each_cpu(ipi_remote_fence_i, NULL, 1); } +EXPORT_SYMBOL(flush_icache_all); /* * Performs an icache flush for the given MM context. RISC-V has no direct diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 69f6678db7f3..965a8cf4829c 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -99,13 +99,13 @@ static void __init setup_initrd(void) pr_info("initrd not found or empty"); goto disable; } - if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) { pr_err("initrd extends beyond end of memory"); goto disable; } size = initrd_end - initrd_start; - memblock_reserve(__pa(initrd_start), size); + memblock_reserve(__pa_symbol(initrd_start), size); initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", @@ -124,8 +124,8 @@ void __init setup_bootmem(void) { struct memblock_region *reg; phys_addr_t mem_size = 0; - phys_addr_t vmlinux_end = __pa(&_end); - phys_addr_t vmlinux_start = __pa(&_start); + phys_addr_t vmlinux_end = __pa_symbol(&_end); + phys_addr_t vmlinux_start = __pa_symbol(&_start); /* Find the memory region containing the kernel */ for_each_memblock(memory, reg) { @@ -445,7 +445,7 @@ static void __init setup_vm_final(void) /* Setup swapper PGD for fixmap */ create_pgd_mapping(swapper_pg_dir, FIXADDR_START, - __pa(fixmap_pgd_next), + __pa_symbol(fixmap_pgd_next), PGDIR_SIZE, PAGE_TABLE); /* Map all memory banks */ @@ -474,7 +474,7 @@ static void __init setup_vm_final(void) clear_fixmap(FIX_PMD); /* Move to swapper page table */ - csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); + csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); local_flush_tlb_all(); } #else diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c index 5451ef3845f2..7fbf56aab661 100644 --- a/arch/riscv/net/bpf_jit_comp.c +++ b/arch/riscv/net/bpf_jit_comp.c @@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) return -1; emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx); - /* if (--TCC < 0) + /* if (TCC-- < 0) * goto out; */ emit(rv_addi(RV_REG_T1, tcc, -1), ctx); off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; if (is_13b_check(off, insn)) return -1; - emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx); + emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx); /* prog = array->ptrs[index]; * if (!prog) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 6da8885251d6..670f14a228e5 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -194,9 +194,9 @@ static inline unsigned long long get_tod_clock_monotonic(void) { unsigned long long tod; - preempt_disable(); + preempt_disable_notrace(); tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; - preempt_enable(); + preempt_enable_notrace(); return tod; } diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 9e1660a6b9db..c3597d2e2ae0 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -35,6 +35,7 @@ EXPORT_SYMBOL(_mcount) ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller + stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller lgr %r1,%r15 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) aghi %r0,MCOUNT_RETURN_FIXUP diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 9cbf490fd162..d5fbd754f41a 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1052,7 +1052,7 @@ static void __init log_component_list(void) if (!early_ipl_comp_list_addr) return; - if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR) + if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL) pr_info("Linux is running with Secure-IPL enabled\n"); else pr_info("Linux is running with Secure-IPL disabled\n"); diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c index da2d4d4c5b0e..707fd99f6734 100644 --- a/arch/s390/kernel/unwind_bc.c +++ b/arch/s390/kernel/unwind_bc.c @@ -36,10 +36,17 @@ static bool update_stack_info(struct unwind_state *state, unsigned long sp) return true; } -static inline bool is_task_pt_regs(struct unwind_state *state, - struct pt_regs *regs) +static inline bool is_final_pt_regs(struct unwind_state *state, + struct pt_regs *regs) { - return task_pt_regs(state->task) == regs; + /* user mode or kernel thread pt_regs at the bottom of task stack */ + if (task_pt_regs(state->task) == regs) + return true; + + /* user mode pt_regs at the bottom of irq stack */ + return state->stack_info.type == STACK_TYPE_IRQ && + state->stack_info.end - sizeof(struct pt_regs) == (unsigned long)regs && + READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE; } bool unwind_next_frame(struct unwind_state *state) @@ -80,7 +87,7 @@ bool unwind_next_frame(struct unwind_state *state) if (!on_stack(info, sp, sizeof(struct pt_regs))) goto out_err; regs = (struct pt_regs *) sp; - if (is_task_pt_regs(state, regs)) + if (is_final_pt_regs(state, regs)) goto out_stop; ip = READ_ONCE_NOCHECK(regs->psw.addr); sp = READ_ONCE_NOCHECK(regs->gprs[15]); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index f0ce22220565..ac44bd76db4b 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); vmem_remove_mapping(start, size); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/s390/purgatory/.gitignore b/arch/s390/purgatory/.gitignore index 04a03433c720..c82157f46b18 100644 --- a/arch/s390/purgatory/.gitignore +++ b/arch/s390/purgatory/.gitignore @@ -1,3 +1,4 @@ purgatory +purgatory.chk purgatory.lds purgatory.ro diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index bc0d7a0d0394..c57f8c40e992 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile @@ -4,7 +4,7 @@ OBJECT_FILES_NON_STANDARD := y purgatory-y := head.o purgatory.o string.o sha256.o mem.o -targets += $(purgatory-y) purgatory.lds purgatory purgatory.ro +targets += $(purgatory-y) purgatory.lds purgatory purgatory.chk purgatory.ro PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE @@ -15,8 +15,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE $(call if_changed_rule,as_o_S) -$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE - $(call if_changed_rule,cc_o_c) +KCOV_INSTRUMENT := n +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare @@ -26,15 +28,22 @@ KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_CFLAGS += $(call cc-option,-fno-PIE) KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS)) -LDFLAGS_purgatory := -r --no-undefined -nostdlib -z nodefaultlib -T +# Since we link purgatory with -r unresolved symbols are not checked, so we +# also link a purgatory.chk binary without -r to check for unresolved symbols. +PURGATORY_LDFLAGS := -nostdlib -z nodefaultlib +LDFLAGS_purgatory := -r $(PURGATORY_LDFLAGS) -T +LDFLAGS_purgatory.chk := -e purgatory_start $(PURGATORY_LDFLAGS) $(obj)/purgatory: $(obj)/purgatory.lds $(PURGATORY_OBJS) FORCE $(call if_changed,ld) +$(obj)/purgatory.chk: $(obj)/purgatory FORCE + $(call if_changed,ld) + OBJCOPYFLAGS_purgatory.ro := -O elf64-s390 OBJCOPYFLAGS_purgatory.ro += --remove-section='*debug*' OBJCOPYFLAGS_purgatory.ro += --remove-section='.comment' OBJCOPYFLAGS_purgatory.ro += --remove-section='.note.*' -$(obj)/purgatory.ro: $(obj)/purgatory FORCE +$(obj)/purgatory.ro: $(obj)/purgatory $(obj)/purgatory.chk FORCE $(call if_changed,objcopy) $(obj)/kexec-purgatory.o: $(obj)/kexec-purgatory.S $(obj)/purgatory.ro FORCE diff --git a/arch/s390/purgatory/string.c b/arch/s390/purgatory/string.c new file mode 100644 index 000000000000..c98c22a72db7 --- /dev/null +++ b/arch/s390/purgatory/string.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0 +#define __HAVE_ARCH_MEMCMP /* arch function */ +#include "../lib/string.c" diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index dfdbaa50946e..d1b1ff2be17a 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = PFN_DOWN(start); unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 2a6d04fcb3e9..6f0edd0c0220 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -14,6 +14,7 @@ config UML select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_BUGVERBOSE + select HAVE_COPY_THREAD_TLS select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index 81c647ef9c6c..adf91ef553ae 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h @@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request, extern unsigned long getreg(struct task_struct *child, int regno); extern int putreg(struct task_struct *child, int regno, unsigned long value); -extern int arch_copy_tls(struct task_struct *new); +extern int arch_set_tls(struct task_struct *new, unsigned long tls); extern void clear_flushed_tls(struct task_struct *task); extern int syscall_trace_enter(struct pt_regs *regs); extern void syscall_trace_leave(struct pt_regs *regs); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 263a8f069133..17045e7211bf 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -153,8 +153,8 @@ void fork_handler(void) userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } -int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct * p) +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); int kthread = current->flags & PF_KTHREAD; @@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) - ret = arch_copy_tls(p); + ret = arch_set_tls(p, tls); } return ret; diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index aa976adb7094..1dac210f7d44 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -103,7 +103,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o quiet_cmd_check_data_rel = DATAREL $@ define cmd_check_data_rel for obj in $(filter %.o,$^); do \ - ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \ + $(READELF) -S $$obj | grep -qF .rel.local && { \ echo "error: $$obj has data relocations!" >&2; \ exit 1; \ } || true; \ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 58a512e33d8d..ee60b81944a7 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -244,6 +244,11 @@ SYM_FUNC_START(efi32_stub_entry) leal efi32_config(%ebp), %eax movl %eax, efi_config(%ebp) + /* Disable paging */ + movl %cr0, %eax + btrl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + jmp startup_32 SYM_FUNC_END(efi32_stub_entry) #endif diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 9a89d98c55bd..f118af9f0718 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -376,7 +376,7 @@ int x86_add_exclusive(unsigned int what) * LBR and BTS are still mutually exclusive. */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) - return 0; + goto out; if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { mutex_lock(&pmc_reserve_mutex); @@ -388,6 +388,7 @@ int x86_add_exclusive(unsigned int what) mutex_unlock(&pmc_reserve_mutex); } +out: atomic_inc(&active_events); return 0; @@ -398,11 +399,15 @@ fail_unlock: void x86_del_exclusive(unsigned int what) { + atomic_dec(&active_events); + + /* + * See the comment in x86_add_exclusive(). + */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) return; atomic_dec(&x86_pmu.lbr_exclusive[what]); - atomic_dec(&active_events); } int x86_setup_perfctr(struct perf_event *event) @@ -1642,9 +1647,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = { ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) { - struct perf_pmu_events_attr *pmu_attr = \ + struct perf_pmu_events_attr *pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); - u64 config = x86_pmu.event_map(pmu_attr->id); + u64 config = 0; + + if (pmu_attr->id < x86_pmu.max_events) + config = x86_pmu.event_map(pmu_attr->id); /* string trumps id */ if (pmu_attr->event_str) @@ -1713,6 +1721,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct perf_pmu_events_attr *pmu_attr; + if (idx >= x86_pmu.max_events) + return 0; + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); /* str trumps id */ return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 38de4a7f6752..6a3b599ee0fe 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -63,9 +63,17 @@ struct bts_buffer { static struct pmu bts_pmu; +static int buf_nr_pages(struct page *page) +{ + if (!PagePrivate(page)) + return 1; + + return 1 << page_private(page); +} + static size_t buf_size(struct page *page) { - return 1 << (PAGE_SHIFT + page_private(page)); + return buf_nr_pages(page) * PAGE_SIZE; } static void * @@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, /* count all the high order buffers */ for (pg = 0, nbuf = 0; pg < nr_pages;) { page = virt_to_page(pages[pg]); - if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) - return NULL; - pg += 1 << page_private(page); + pg += buf_nr_pages(page); nbuf++; } @@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, unsigned int __nr_pages; page = virt_to_page(pages[pg]); - __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; + __nr_pages = buf_nr_pages(page); buf->buf[nbuf].page = page; buf->buf[nbuf].offset = offset; buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index dbaa1b088a30..c37cb12d0ef6 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -15,6 +15,7 @@ #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f +#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 @@ -658,6 +659,10 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, @@ -826,6 +831,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ + IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */ IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index b10a5ec79e48..ad20220af303 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -369,11 +369,6 @@ #define SNR_M2M_PCI_PMON_BOX_CTL 0x438 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff -/* SNR PCIE3 */ -#define SNR_PCIE3_PCI_PMON_CTL0 0x508 -#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8 -#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4 - /* SNR IMC */ #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38 @@ -4328,27 +4323,12 @@ static struct intel_uncore_type snr_uncore_m2m = { .format_group = &snr_m2m_uncore_format_group, }; -static struct intel_uncore_type snr_uncore_pcie3 = { - .name = "pcie3", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0, - .event_ctl = SNR_PCIE3_PCI_PMON_CTL0, - .event_mask = SNBEP_PMON_RAW_EVENT_MASK, - .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL, - .ops = &ivbep_uncore_pci_ops, - .format_group = &ivbep_uncore_format_group, -}; - enum { SNR_PCI_UNCORE_M2M, - SNR_PCI_UNCORE_PCIE3, }; static struct intel_uncore_type *snr_pci_uncores[] = { [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m, - [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3, NULL, }; @@ -4357,10 +4337,6 @@ static const struct pci_device_id snr_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0), }, - { /* PCIe3 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a), - .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0), - }, { /* end: all zeroes */ } }; @@ -4536,6 +4512,7 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), + { /* end: all zeroes */ }, }; static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = { diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 90f75e515876..62c30279be77 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -615,9 +615,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) return; clear_all: - clear_cpu_cap(c, X86_FEATURE_SME); + setup_clear_cpu_cap(X86_FEATURE_SME); clear_sev: - clear_cpu_cap(c, X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV); } } diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 5167bd2bb6b1..d6cf5c18a7e0 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -266,10 +266,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu) smca_set_misc_banks_map(bank, cpu); /* Return early if this bank was already initialized. */ - if (smca_banks[bank].hwid) + if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0) return; - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { + if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { pr_warn("Failed to read MCA_IPID for bank %d\n", bank); return; } diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 5f42f25bac8f..2e2a421c8528 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -819,8 +819,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, if (quirk_no_way_out) quirk_no_way_out(i, m, regs); + m->bank = i; if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { - m->bank = i; mce_read_aux(m, i); *msg = tmp; return 1; diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index b38010b541d6..6c3e1c92f183 100644 --- a/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -467,6 +467,7 @@ static int thermal_throttle_online(unsigned int cpu) { struct thermal_state *state = &per_cpu(thermal_state, cpu); struct device *dev = get_cpu_device(cpu); + u32 l; state->package_throttle.level = PACKAGE_LEVEL; state->core_throttle.level = CORE_LEVEL; @@ -474,6 +475,10 @@ static int thermal_throttle_online(unsigned int cpu) INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work); INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work); + /* Unmask the thermal vector after the above workqueues are initialized. */ + l = apic_read(APIC_LVTTHMR); + apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); + return thermal_throttle_add_dev(dev, cpu); } @@ -722,10 +727,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c) rdmsr(MSR_IA32_MISC_ENABLE, l, h); wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); - /* Unmask the thermal vector: */ - l = apic_read(APIC_LVTTHMR); - apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); - pr_info_once("CPU0: Thermal monitoring enabled (%s)\n", tm2 ? "TM2" : "TM1"); diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 03eb90d00af0..89049b343c7a 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -618,7 +618,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) if (static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); list_del(&d->list); - if (is_mbm_enabled()) + if (r->mon_capable && is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { /* diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 2e3b06d6bbc6..dac7209a0708 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1741,9 +1741,6 @@ static int set_cache_qos_cfg(int level, bool enable) struct rdt_domain *d; int cpu; - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - if (level == RDT_RESOURCE_L3) update = l3_qos_cfg_update; else if (level == RDT_RESOURCE_L2) @@ -1751,6 +1748,9 @@ static int set_cache_qos_cfg(int level, bool enable) else return -EINVAL; + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + r_l = &rdt_resources_all[level]; list_for_each_entry(d, &r_l->domains, list) { /* Pick one CPU from each domain instance to update MSR */ diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 4cba91ec8049..2f9ec14be3b1 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -710,8 +710,12 @@ static struct chipset early_qrk[] __initdata = { */ { PCI_VENDOR_ID_INTEL, 0x0f00, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, + { PCI_VENDOR_ID_INTEL, 0x3e20, + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_INTEL, 0x3ec4, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, + { PCI_VENDOR_ID_INTEL, 0x8a12, + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_BROADCOM, 0x4331, PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, {} diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index cfafa320a8cf..cf55629ff0ff 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) entry->edx |= F(SPEC_CTRL); if (boot_cpu_has(X86_FEATURE_STIBP)) entry->edx |= F(INTEL_STIBP); - if (boot_cpu_has(X86_FEATURE_SSBD)) + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + boot_cpu_has(X86_FEATURE_AMD_SSBD)) entry->edx |= F(SPEC_CTRL_SSBD); /* * We emulate ARCH_CAPABILITIES in software even @@ -759,7 +760,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, entry->ebx |= F(AMD_IBRS); if (boot_cpu_has(X86_FEATURE_STIBP)) entry->ebx |= F(AMD_STIBP); - if (boot_cpu_has(X86_FEATURE_SSBD)) + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + boot_cpu_has(X86_FEATURE_AMD_SSBD)) entry->ebx |= F(AMD_SSBD); if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) entry->ebx |= F(AMD_SSB_NO); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 930edeb41ec3..0a74407ef92e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index dcb9bc961b39..bcfede46fe02 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); - struct zone *zone = page_zone(page); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); kernel_physical_mapping_remove(start, start + size); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 7675cf754d90..f8f0220b6a66 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -260,10 +260,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } - /* No need to reserve regions that will never be freed. */ - if (md.attribute & EFI_MEMORY_RUNTIME) - return; - size += addr % EFI_PAGE_SIZE; size = round_up(size, EFI_PAGE_SIZE); addr = round_down(addr, EFI_PAGE_SIZE); @@ -293,6 +289,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) early_memunmap(new, new_size); efi_memmap_install(new_phys, num_entries); + e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); + e820__update_table(e820_table); } /* diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c index 5bd949da7a4a..ac8eee093f9c 100644 --- a/arch/x86/um/tls_32.c +++ b/arch/x86/um/tls_32.c @@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info, return 0; } -int arch_copy_tls(struct task_struct *new) +int arch_set_tls(struct task_struct *new, unsigned long tls) { struct user_desc info; int idx, ret = -EFAULT; - if (copy_from_user(&info, - (void __user *) UPT_SI(&new->thread.regs.regs), - sizeof(info))) + if (copy_from_user(&info, (void __user *) tls, sizeof(info))) goto out; ret = -EINVAL; diff --git a/arch/x86/um/tls_64.c b/arch/x86/um/tls_64.c index 3a621e0d3925..ebd3855d9b13 100644 --- a/arch/x86/um/tls_64.c +++ b/arch/x86/um/tls_64.c @@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task) { } -int arch_copy_tls(struct task_struct *t) +int arch_set_tls(struct task_struct *t, unsigned long tls) { /* * If CLONE_SETTLS is set, we need to save the thread id - * (which is argument 5, child_tid, of clone) so it can be set - * during context switches. + * so it can be set during context switches. */ - t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)]; + t->thread.arch.fs = tls; return 0; } diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 4a3fa295d8fe..296c5324dace 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -24,6 +24,7 @@ config XTENSA select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL select HAVE_ARCH_TRACEHOOK + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 9e1c49134c07..3edecc41ef8c 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * involved. Much simpler to just not copy those live frames across. */ -int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, - unsigned long thread_fn_arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn, + unsigned long thread_fn_arg, struct task_struct *p, + unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -266,9 +267,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, childregs->syscall = regs->syscall; - /* The thread pointer is passed in the '4th argument' (= a5) */ if (clone_flags & CLONE_SETTLS) - childregs->threadptr = childregs->areg[5]; + childregs->threadptr = tls; } else { p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); diff --git a/block/bio.c b/block/bio.c index a5d75f6bf4c7..94d697217887 100644 --- a/block/bio.c +++ b/block/bio.c @@ -539,6 +539,55 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) EXPORT_SYMBOL(zero_fill_bio_iter); /** + * bio_truncate - truncate the bio to small size of @new_size + * @bio: the bio to be truncated + * @new_size: new size for truncating the bio + * + * Description: + * Truncate the bio to new size of @new_size. If bio_op(bio) is + * REQ_OP_READ, zero the truncated part. This function should only + * be used for handling corner cases, such as bio eod. + */ +void bio_truncate(struct bio *bio, unsigned new_size) +{ + struct bio_vec bv; + struct bvec_iter iter; + unsigned int done = 0; + bool truncated = false; + + if (new_size >= bio->bi_iter.bi_size) + return; + + if (bio_op(bio) != REQ_OP_READ) + goto exit; + + bio_for_each_segment(bv, bio, iter) { + if (done + bv.bv_len > new_size) { + unsigned offset; + + if (!truncated) + offset = new_size - done; + else + offset = 0; + zero_user(bv.bv_page, offset, bv.bv_len - offset); + truncated = true; + } + done += bv.bv_len; + } + + exit: + /* + * Don't touch bvec table here and make it really immutable, since + * fs bio user has to retrieve all pages via bio_for_each_segment_all + * in its .end_bio() callback. + * + * It is enough to truncate bio by updating .bi_size since we can make + * correct bvec with the updated .bi_size for drivers. + */ + bio->bi_iter.bi_size = new_size; +} + +/** * bio_put - release a reference to a bio * @bio: bio to release reference to * diff --git a/block/blk-core.c b/block/blk-core.c index e0a094fddee5..089e890ab208 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -885,11 +885,14 @@ generic_make_request_checks(struct bio *bio) } /* - * For a REQ_NOWAIT based request, return -EOPNOTSUPP - * if queue is not a request based queue. + * Non-mq queues do not honor REQ_NOWAIT, so complete a bio + * with BLK_STS_AGAIN status in order to catch -EAGAIN and + * to give a chance to the caller to repeat request gracefully. */ - if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) - goto not_supported; + if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) { + status = BLK_STS_AGAIN; + goto end_io; + } if (should_fail_bio(bio)) goto end_io; diff --git a/block/blk-flush.c b/block/blk-flush.c index 1777346baf06..3f977c517960 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -69,6 +69,7 @@ #include <linux/blkdev.h> #include <linux/gfp.h> #include <linux/blk-mq.h> +#include <linux/lockdep.h> #include "blk.h" #include "blk-mq.h" @@ -505,6 +506,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); + lockdep_register_key(&fq->key); + lockdep_set_class(&fq->mq_flush_lock, &fq->key); + return fq; fail_rq: @@ -519,6 +523,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq) if (!fq) return; + lockdep_unregister_key(&fq->key); kfree(fq->flush_rq); kfree(fq); } diff --git a/block/blk-iocost.c b/block/blk-iocost.c index e01267f99183..27ca68621137 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } -static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) +static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) { struct ioc *ioc = iocg->ioc; struct blkcg_gq *blkg = iocg_to_blkg(iocg); @@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) /* clear or maintain depending on the overage */ if (time_before_eq64(vtime, now->vnow)) { blkcg_clear_delay(blkg); - return; + return false; } if (!atomic_read(&blkg->use_delay) && time_before_eq64(vtime, now->vnow + vmargin)) - return; + return false; /* use delay */ if (cost) { @@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); if (hrtimer_is_queued(&iocg->delay_timer) && abs(oexpires - expires) <= margin_ns / 4) - return; + return true; hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), margin_ns / 4, HRTIMER_MODE_ABS); + return true; } static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) @@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) */ if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { atomic64_add(abs_cost, &iocg->abs_vdebt); - iocg_kick_delay(iocg, &now, cost); + if (iocg_kick_delay(iocg, &now, cost)) + blkcg_schedule_throttle(rqos->q, + (bio->bi_opf & REQ_SWAP) == REQ_SWAP); return; } diff --git a/block/blk-map.c b/block/blk-map.c index 3a62e471d81b..b0790268ed9d 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return 0; unmap_rq: - __blk_rq_unmap_user(bio); + blk_rq_unmap_user(bio); fail: rq->bio = NULL; return ret; diff --git a/block/blk-merge.c b/block/blk-merge.c index d783bdc4559b..1534ed736363 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -157,17 +157,20 @@ static inline unsigned get_max_io_size(struct request_queue *q, return sectors & (lbs - 1); } -static unsigned get_max_segment_size(const struct request_queue *q, - unsigned offset) +static inline unsigned get_max_segment_size(const struct request_queue *q, + struct page *start_page, + unsigned long offset) { unsigned long mask = queue_segment_boundary(q); - /* default segment boundary mask means no boundary limit */ - if (mask == BLK_SEG_BOUNDARY_MASK) - return queue_max_segment_size(q); + offset = mask & (page_to_phys(start_page) + offset); - return min_t(unsigned long, mask - (mask & offset) + 1, - queue_max_segment_size(q)); + /* + * overflow may be triggered in case of zero page physical address + * on 32bit arch, use queue's max segment size when that happens. + */ + return min_not_zero(mask - offset + 1, + (unsigned long)queue_max_segment_size(q)); } /** @@ -201,7 +204,8 @@ static bool bvec_split_segs(const struct request_queue *q, unsigned seg_size = 0; while (len && *nsegs < max_segs) { - seg_size = get_max_segment_size(q, bv->bv_offset + total_len); + seg_size = get_max_segment_size(q, bv->bv_page, + bv->bv_offset + total_len); seg_size = min(seg_size, len); (*nsegs)++; @@ -419,7 +423,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q, while (nbytes > 0) { unsigned offset = bvec->bv_offset + total; - unsigned len = min(get_max_segment_size(q, offset), nbytes); + unsigned len = min(get_max_segment_size(q, bvec->bv_page, + offset), nbytes); struct page *page = bvec->bv_page; /* diff --git a/block/blk-settings.c b/block/blk-settings.c index 5f6dcc7a47bd..c8eda2e7b91e 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -328,7 +328,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); * storage device can address. The default of 512 covers most * hardware. **/ -void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) +void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) { q->limits.logical_block_size = size; diff --git a/block/blk.h b/block/blk.h index 6842f28c033e..0b8884353f6b 100644 --- a/block/blk.h +++ b/block/blk.h @@ -30,6 +30,7 @@ struct blk_flush_queue { * at the same time */ struct request *orig_rq; + struct lock_class_key key; spinlock_t mq_flush_lock; }; diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 347dda16c2f4..6cbb7926534c 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -266,7 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req = bd->rq; struct bsg_set *bset = container_of(q->tag_set, struct bsg_set, tag_set); - int sts = BLK_STS_IOERR; + blk_status_t sts = BLK_STS_IOERR; int ret; blk_mq_start_request(req); diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 6ca015f92766..3ed7a0f144a9 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -6,6 +6,7 @@ #include <linux/compat.h> #include <linux/elevator.h> #include <linux/hdreg.h> +#include <linux/pr.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/types.h> @@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) * but we call blkdev_ioctl, which gets the lock for us */ case BLKRRPART: + case BLKREPORTZONE: + case BLKRESETZONE: + case BLKOPENZONE: + case BLKCLOSEZONE: + case BLKFINISHZONE: + case BLKGETZONESZ: + case BLKGETNRZONES: return blkdev_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg)); case BLKBSZSET_32: @@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKTRACETEARDOWN: /* compatible */ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); return ret; + case IOC_PR_REGISTER: + case IOC_PR_RESERVE: + case IOC_PR_RELEASE: + case IOC_PR_PREEMPT: + case IOC_PR_PREEMPT_ABORT: + case IOC_PR_CLEAR: + return blkdev_ioctl(bdev, mode, cmd, + (unsigned long)compat_ptr(arg)); default: if (disk->fops->compat_ioctl) ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c index d16d893bd195..378b18b9bc34 100644 --- a/crypto/asymmetric_keys/asym_tpm.c +++ b/crypto/asymmetric_keys/asym_tpm.c @@ -470,6 +470,7 @@ static int tpm_key_encrypt(struct tpm_key *tk, if (ret < 0) goto error_free_tfm; + ret = -ENOMEM; req = akcipher_request_alloc(tfm, GFP_KERNEL); if (!req) goto error_free_tfm; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 364b9df9d631..d7f43d4ea925 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -184,6 +184,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params, if (IS_ERR(tfm)) return PTR_ERR(tfm); + ret = -ENOMEM; req = akcipher_request_alloc(tfm, GFP_KERNEL); if (!req) goto error_free_tfm; diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index f41744b9b38a..66a570d0da83 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -76,8 +76,7 @@ enum brcm_ahci_version { }; enum brcm_ahci_quirks { - BRCM_AHCI_QUIRK_NO_NCQ = BIT(0), - BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1), + BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0), }; struct brcm_ahci_priv { @@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv) brcm_sata_phy_disable(priv, i); } -static u32 brcm_ahci_get_portmask(struct platform_device *pdev, +static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv, struct brcm_ahci_priv *priv) { - void __iomem *ahci; - struct resource *res; u32 impl; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci"); - ahci = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(ahci)) - return 0; - - impl = readl(ahci + HOST_PORTS_IMPL); + impl = readl(hpriv->mmio + HOST_PORTS_IMPL); if (fls(impl) > SATA_TOP_MAX_PHYS) dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n", @@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev, else if (!impl) dev_info(priv->dev, "no ports found\n"); - devm_iounmap(&pdev->dev, ahci); - devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); - return impl; } @@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev, /* Perform the SATA PHY reset sequence */ brcm_sata_phy_disable(priv, ap->port_no); + /* Reset the SATA clock */ + ahci_platform_disable_clks(hpriv); + msleep(10); + + ahci_platform_enable_clks(hpriv); + msleep(10); + /* Bring the PHY back on */ brcm_sata_phy_enable(priv, ap->port_no); @@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; - int ret; - ret = ahci_platform_suspend(dev); brcm_sata_phys_disable(priv); - return ret; + + return ahci_platform_suspend(dev); } static int brcm_ahci_resume(struct device *dev) @@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; + int ret; + + /* Make sure clocks are turned on before re-configuration */ + ret = ahci_platform_enable_clks(hpriv); + if (ret) + return ret; brcm_sata_init(priv); brcm_sata_phys_enable(priv); brcm_sata_alpm_init(hpriv); - return ahci_platform_resume(dev); + + /* Since we had to enable clocks earlier on, we cannot use + * ahci_platform_resume() as-is since a second call to + * ahci_platform_enable_resources() would bump up the resources + * (regulators, clocks, PHYs) count artificially so we copy the part + * after ahci_platform_enable_resources(). + */ + ret = ahci_platform_enable_phys(hpriv); + if (ret) + goto out_disable_phys; + + ret = ahci_platform_resume_host(dev); + if (ret) + goto out_disable_platform_phys; + + /* We resumed so update PM runtime state */ + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); + ahci_platform_disable_clks(hpriv); + return ret; } #endif @@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (!IS_ERR_OR_NULL(priv->rcdev)) reset_control_deassert(priv->rcdev); - if ((priv->version == BRCM_SATA_BCM7425) || - (priv->version == BRCM_SATA_NSP)) { - priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ; + hpriv = ahci_platform_get_resources(pdev, 0); + if (IS_ERR(hpriv)) { + ret = PTR_ERR(hpriv); + goto out_reset; + } + + hpriv->plat_data = priv; + hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO; + + switch (priv->version) { + case BRCM_SATA_BCM7425: + hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE; + /* fall through */ + case BRCM_SATA_NSP: + hpriv->flags |= AHCI_HFLAG_NO_NCQ; priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; + break; + default: + break; } + ret = ahci_platform_enable_clks(hpriv); + if (ret) + goto out_reset; + + /* Must be first so as to configure endianness including that + * of the standard AHCI register space. + */ brcm_sata_init(priv); - priv->port_mask = brcm_ahci_get_portmask(pdev, priv); - if (!priv->port_mask) - return -ENODEV; + /* Initializes priv->port_mask which is used below */ + priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); + if (!priv->port_mask) { + ret = -ENODEV; + goto out_disable_clks; + } + /* Must be done before ahci_platform_enable_phys() */ brcm_sata_phys_enable(priv); - hpriv = ahci_platform_get_resources(pdev, 0); - if (IS_ERR(hpriv)) - return PTR_ERR(hpriv); - hpriv->plat_data = priv; - hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; - brcm_sata_alpm_init(hpriv); - ret = ahci_platform_enable_resources(hpriv); + ret = ahci_platform_enable_phys(hpriv); if (ret) - return ret; - - if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ) - hpriv->flags |= AHCI_HFLAG_NO_NCQ; - hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO; + goto out_disable_phys; ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info, &ahci_platform_sht); if (ret) - return ret; + goto out_disable_platform_phys; dev_info(dev, "Broadcom AHCI SATA3 registered\n"); return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); +out_disable_clks: + ahci_platform_disable_clks(hpriv); +out_reset: + if (!IS_ERR_OR_NULL(priv->rcdev)) + reset_control_assert(priv->rcdev); + return ret; } static int brcm_ahci_remove(struct platform_device *pdev) @@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev) struct brcm_ahci_priv *priv = hpriv->plat_data; int ret; + brcm_sata_phys_disable(priv); + ret = ata_platform_remove_one(pdev); if (ret) return ret; - brcm_sata_phys_disable(priv); - return 0; } diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 8befce036af8..129556fcf6be 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops); * RETURNS: * 0 on success otherwise a negative error code */ -static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) { int rc, i; @@ -74,6 +74,7 @@ disable_phys: } return rc; } +EXPORT_SYMBOL_GPL(ahci_platform_enable_phys); /** * ahci_platform_disable_phys - Disable PHYs @@ -81,7 +82,7 @@ disable_phys: * * This function disables all PHYs found in hpriv->phys. */ -static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) { int i; @@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) phy_exit(hpriv->phys[i]); } } +EXPORT_SYMBOL_GPL(ahci_platform_disable_phys); /** * ahci_platform_enable_clks - Enable platform clocks diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e9017c570bc5..6f4ab5c5b52d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5329,6 +5329,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc) } /** + * ata_qc_get_active - get bitmask of active qcs + * @ap: port in question + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Bitmask of active qcs + */ +u64 ata_qc_get_active(struct ata_port *ap) +{ + u64 qc_active = ap->qc_active; + + /* ATA_TAG_INTERNAL is sent to hw as tag 0 */ + if (qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (1 << 0); + qc_active &= ~(1ULL << ATA_TAG_INTERNAL); + } + + return qc_active; +} +EXPORT_SYMBOL_GPL(ata_qc_get_active); + +/** * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question * @qc_active: new qc_active mask diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 9239615d8a04..d55ee244d693 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) i, ioread32(hcr_base + CC), ioread32(hcr_base + CA)); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); return; } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) { diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 277f11909fc1..d7228f8e9297 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp } if (work_done) { - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); /* Update the software queue position index in hardware */ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index f3e62f5528bd..eb9dc14e5147 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) check_commands = 0; check_commands &= ~(1 << pos); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); } } diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b23d1e4bad33..9d0d65efcd94 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -374,7 +374,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, here = (eni_vcc->descr+skip) & (eni_vcc->words-1); dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1); if (!eff) size += skip; @@ -447,7 +447,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, if (size != eff) { dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } if (!j || j > 2*RX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n"); diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile index 4a66888e7253..5fa7ce3745a0 100644 --- a/drivers/base/firmware_loader/builtin/Makefile +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -17,7 +17,7 @@ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits filechk_fwbin = \ echo "/* Generated by $(src)/Makefile */" ;\ echo " .section .rodata" ;\ - echo " .p2align $(ASM_ALIGN)" ;\ + echo " .p2align 4" ;\ echo "_fw_$(FWSTR)_bin:" ;\ echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\ echo "_fw_end:" ;\ diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 57532465fb83..b4607dd96185 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); - if (ret) { + if (ret) sock_shutdown(nbd); - flush_workqueue(nbd->recv_workq); - } + flush_workqueue(nbd->recv_workq); + mutex_lock(&nbd->config_lock); nbd_bdev_reset(bdev); /* user requested, ignore socket errors */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index d4d88b581822..ed34785dd64b 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, return BLK_STS_IOERR; case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: /* Writes must be at the write pointer position */ if (sector != zone->wp) return BLK_STS_IOERR; - if (zone->cond == BLK_ZONE_COND_EMPTY) + if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN; zone->wp += nr_sectors; @@ -186,7 +188,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, if (zone->cond == BLK_ZONE_COND_FULL) return BLK_STS_IOERR; - zone->cond = BLK_ZONE_COND_CLOSED; + if (zone->wp == zone->start) + zone->cond = BLK_ZONE_COND_EMPTY; + else + zone->cond = BLK_ZONE_COND_CLOSED; break; case REQ_OP_ZONE_FINISH: if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ee67bf929fac..861fc65a1b75 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = { .release = pkt_close, .ioctl = pkt_ioctl, #ifdef CONFIG_COMPAT - .ioctl = pkt_compat_ioctl, + .compat_ioctl = pkt_compat_ioctl, #endif .check_events = pkt_check_events, }; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index d6a6adfd5159..4c5d99f87813 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -190,6 +190,9 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, { int err; struct xen_blkif *blkif = ring->blkif; + const struct blkif_common_sring *sring_common; + RING_IDX rsp_prod, req_prod; + unsigned int size; /* Already connected through? */ if (ring->irq) @@ -200,46 +203,62 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, if (err < 0) return err; + sring_common = (struct blkif_common_sring *)ring->blk_ring; + rsp_prod = READ_ONCE(sring_common->rsp_prod); + req_prod = READ_ONCE(sring_common->req_prod); + switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: { - struct blkif_sring *sring; - sring = (struct blkif_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.native, sring, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_sring *sring_native = + (struct blkif_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.native, sring_native, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs); break; } case BLKIF_PROTOCOL_X86_32: { - struct blkif_x86_32_sring *sring_x86_32; - sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_x86_32_sring *sring_x86_32 = + (struct blkif_x86_32_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs); break; } case BLKIF_PROTOCOL_X86_64: { - struct blkif_x86_64_sring *sring_x86_64; - sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_x86_64_sring *sring_x86_64 = + (struct blkif_x86_64_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs); break; } default: BUG(); } + err = -EIO; + if (req_prod - rsp_prod > size) + goto fail; + err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, xen_blkif_be_int, 0, "blkif-backend", ring); - if (err < 0) { - xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); - ring->blk_rings.common.sring = NULL; - return err; - } + if (err < 0) + goto fail; ring->irq = err; return 0; + +fail: + xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); + ring->blk_rings.common.sring = NULL; + return err; } static int xen_blkif_disconnect(struct xen_blkif *blkif) @@ -1131,7 +1150,8 @@ static struct xenbus_driver xen_blkbk_driver = { .ids = xen_blkbk_ids, .probe = xen_blkbk_probe, .remove = xen_blkbk_remove, - .otherend_changed = frontend_changed + .otherend_changed = frontend_changed, + .allow_rebind = true, }; int xen_blkif_xenbus_init(void) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a74d03913822..c02be06c5299 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1113,8 +1113,8 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, if (!VDEV_IS_EXTENDED(info->vdevice)) { err = xen_translate_vdev(info->vdevice, &minor, &offset); if (err) - return err; - nr_parts = PARTS_PER_DISK; + return err; + nr_parts = PARTS_PER_DISK; } else { minor = BLKIF_MINOR_EXT(info->vdevice); nr_parts = PARTS_PER_EXT_DISK; diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 56887c6877a7..ccb44fe790a7 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata) return -EINVAL; } + /* Always add a slot for main clocks fck and ick even if unused */ + if (!nr_fck) + ddata->nr_clocks++; + if (!nr_ick) + ddata->nr_clocks++; + ddata->clocks = devm_kcalloc(ddata->dev, ddata->nr_clocks, sizeof(*ddata->clocks), GFP_KERNEL); @@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata) struct clk *clock; int i, error; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return 0; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata) struct clk *clock; int i; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -981,7 +987,8 @@ static int sysc_disable_module(struct device *dev) return ret; } - if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY) + if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) || + ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY)) best_mode = SYSC_IDLE_FORCE; reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); @@ -1583,6 +1590,10 @@ static int sysc_reset(struct sysc *ddata) sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + if (ddata->cfg.srst_udelay) + usleep_range(ddata->cfg.srst_udelay, + ddata->cfg.srst_udelay * 2); + if (ddata->clk_enable_quirk) ddata->clk_enable_quirk(ddata); diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c index 31c374b1b91b..7ecf20a6d19c 100644 --- a/drivers/char/agp/isoch.c +++ b/drivers/char/agp/isoch.c @@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, unsigned int cdev = 0; u32 mnistat, tnistat, tstatus, mcmd; u16 tnicmd, mnicmd; - u8 mcapndx; u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; u32 step, rem, rem_isoch, rem_async; int ret = 0; @@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; - mcapndx = cur->capndx; - pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); master[cdev].maxbw = (mnistat >> 16) & 0xff; @@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, cur = master[cdev].dev; dev = cur->dev; - mcapndx = cur->capndx; - master[cdev].rq += (cdev == ndevs - 1) ? (rem_async + rem_isoch) : step; @@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) { struct pci_dev *td = bridge->dev, *dev = NULL; u8 mcapndx; - u32 isoch, arqsz; + u32 isoch; u32 tstatus, mstatus, ncapid; u32 mmajor; u16 mpstat; @@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) if (isoch == 0) /* isoch xfers not available, bail out. */ return -ENODEV; - arqsz = (tstatus >> 13) & 0x7; - /* * Allocate a head for our AGP 3.5 device list * (multiple AGP v3 devices are allowed behind a single bridge). diff --git a/drivers/char/random.c b/drivers/char/random.c index 909e0c3d82ea..cda12933a17d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2175,6 +2175,7 @@ const struct file_operations urandom_fops = { .read = urandom_read, .write = random_write, .unlocked_ioctl = random_ioctl, + .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, }; diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 2ec47a69a2a6..87f449340202 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -61,6 +61,12 @@ static void tpm_dev_async_work(struct work_struct *work) mutex_lock(&priv->buffer_mutex); priv->command_enqueued = false; + ret = tpm_try_get_ops(priv->chip); + if (ret) { + priv->response_length = ret; + goto out; + } + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); @@ -68,6 +74,7 @@ static void tpm_dev_async_work(struct work_struct *work) priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } +out: mutex_unlock(&priv->buffer_mutex); wake_up_interruptible(&priv->async_wait); } @@ -123,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, priv->response_read = true; ret_size = min_t(ssize_t, size, priv->response_length); - if (!ret_size) { + if (ret_size <= 0) { priv->response_length = 0; goto out; } @@ -204,6 +211,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, if (file->f_flags & O_NONBLOCK) { priv->command_enqueued = true; queue_work(tpm_dev_wq, &priv->async_work); + tpm_put_ops(priv->chip); mutex_unlock(&priv->buffer_mutex); return size; } diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h index 1089fc0bb290..f3742bcc73e3 100644 --- a/drivers/char/tpm/tpm-dev.h +++ b/drivers/char/tpm/tpm-dev.h @@ -14,7 +14,7 @@ struct file_priv { struct work_struct timeout_work; struct work_struct async_work; wait_queue_head_t async_wait; - size_t response_length; + ssize_t response_length; bool response_read; bool command_enqueued; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index b9e1547be6b5..5620747da0cf 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -218,7 +218,6 @@ int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests); int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max); -void tpm2_flush_context(struct tpm_chip *chip, u32 handle); ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, const char *desc); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index fdb457704aa7..13696deceae8 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -362,6 +362,7 @@ void tpm2_flush_context(struct tpm_chip *chip, u32 handle) tpm_transmit_cmd(chip, &buf, 0, "flushing context"); tpm_buf_destroy(&buf); } +EXPORT_SYMBOL_GPL(tpm2_flush_context); struct tpm2_get_cap_out { u8 more_data; diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 6640a14dbe48..22bf553ccf9d 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -32,7 +32,7 @@ static const uuid_t ftpm_ta_uuid = 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96); /** - * ftpm_tee_tpm_op_recv - retrieve fTPM response. + * ftpm_tee_tpm_op_recv() - retrieve fTPM response. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h. * @buf: the buffer to store data. * @count: the number of bytes to read. @@ -61,7 +61,7 @@ static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) } /** - * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory. + * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h * @buf: the buffer to send. * @len: the number of bytes to send. @@ -208,7 +208,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data) } /** - * ftpm_tee_probe - initialize the fTPM + * ftpm_tee_probe() - initialize the fTPM * @pdev: the platform_device description. * * Return: @@ -298,7 +298,7 @@ out_tee_session: } /** - * ftpm_tee_remove - remove the TPM device + * ftpm_tee_remove() - remove the TPM device * @pdev: the platform_device description. * * Return: @@ -328,6 +328,19 @@ static int ftpm_tee_remove(struct platform_device *pdev) return 0; } +/** + * ftpm_tee_shutdown() - shutdown the TPM device + * @pdev: the platform_device description. + */ +static void ftpm_tee_shutdown(struct platform_device *pdev) +{ + struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev); + + tee_shm_free(pvt_data->shm); + tee_client_close_session(pvt_data->ctx, pvt_data->session); + tee_client_close_context(pvt_data->ctx); +} + static const struct of_device_id of_ftpm_tee_ids[] = { { .compatible = "microsoft,ftpm" }, { } @@ -341,6 +354,7 @@ static struct platform_driver ftpm_tee_driver = { }, .probe = ftpm_tee_probe, .remove = ftpm_tee_remove, + .shutdown = ftpm_tee_shutdown, }; module_platform_driver(ftpm_tee_driver); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 8af2cee1a762..27c6ca031e23 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1059,8 +1059,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, goto out_err; } - tpm_chip_start(chip); - chip->flags |= TPM_CHIP_FLAG_IRQ; if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); @@ -1070,7 +1068,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, } else { tpm_tis_probe_irq(chip, intmask); } - tpm_chip_stop(chip); } rc = tpm_chip_register(chip); diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c index 0aabe49aed09..a9d4234758d7 100644 --- a/drivers/clk/at91/at91sam9260.c +++ b/drivers/clk/at91/at91sam9260.c @@ -348,7 +348,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c index 0ac34cdaa106..77fe83a73bf4 100644 --- a/drivers/clk/at91/at91sam9rl.c +++ b/drivers/clk/at91/at91sam9rl.c @@ -83,7 +83,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 0855f3a80cc7..086cf0b4955c 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -146,7 +146,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 0b03cfae3a9d..b71515acdec1 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -275,7 +275,7 @@ static int __init pmc_register_ops(void) np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids); - pmcreg = syscon_node_to_regmap(np); + pmcreg = device_node_to_regmap(np); if (IS_ERR(pmcreg)) return PTR_ERR(pmcreg); diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 0de1108737db..ff7e3f727082 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -162,7 +162,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index 25b156d4e645..a6dee4a3b6e4 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -136,7 +136,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b68e200829f2..772258de2d1f 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3249,6 +3249,34 @@ static inline void clk_debug_unregister(struct clk_core *core) } #endif +static void clk_core_reparent_orphans_nolock(void) +{ + struct clk_core *orphan; + struct hlist_node *tmp2; + + /* + * walk the list of orphan clocks and reparent any that newly finds a + * parent. + */ + hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { + struct clk_core *parent = __clk_init_parent(orphan); + + /* + * We need to use __clk_set_parent_before() and _after() to + * to properly migrate any prepare/enable count of the orphan + * clock. This is important for CLK_IS_CRITICAL clocks, which + * are enabled during init but might not have a parent yet. + */ + if (parent) { + /* update the clk tree topology */ + __clk_set_parent_before(orphan, parent); + __clk_set_parent_after(orphan, parent, NULL); + __clk_recalc_accuracies(orphan); + __clk_recalc_rates(orphan, 0); + } + } +} + /** * __clk_core_init - initialize the data structures in a struct clk_core * @core: clk_core being initialized @@ -3259,8 +3287,6 @@ static inline void clk_debug_unregister(struct clk_core *core) static int __clk_core_init(struct clk_core *core) { int ret; - struct clk_core *orphan; - struct hlist_node *tmp2; unsigned long rate; if (!core) @@ -3400,34 +3426,21 @@ static int __clk_core_init(struct clk_core *core) if (core->flags & CLK_IS_CRITICAL) { unsigned long flags; - clk_core_prepare(core); + ret = clk_core_prepare(core); + if (ret) + goto out; flags = clk_enable_lock(); - clk_core_enable(core); + ret = clk_core_enable(core); clk_enable_unlock(flags); + if (ret) { + clk_core_unprepare(core); + goto out; + } } - /* - * walk the list of orphan clocks and reparent any that newly finds a - * parent. - */ - hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { - struct clk_core *parent = __clk_init_parent(orphan); + clk_core_reparent_orphans_nolock(); - /* - * We need to use __clk_set_parent_before() and _after() to - * to properly migrate any prepare/enable count of the orphan - * clock. This is important for CLK_IS_CRITICAL clocks, which - * are enabled during init but might not have a parent yet. - */ - if (parent) { - /* update the clk tree topology */ - __clk_set_parent_before(orphan, parent); - __clk_set_parent_after(orphan, parent, NULL); - __clk_recalc_accuracies(orphan); - __clk_recalc_rates(orphan, 0); - } - } kref_init(&core->ref); out: @@ -4179,6 +4192,13 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) EXPORT_SYMBOL_GPL(clk_notifier_unregister); #ifdef CONFIG_OF +static void clk_core_reparent_orphans(void) +{ + clk_prepare_lock(); + clk_core_reparent_orphans_nolock(); + clk_prepare_unlock(); +} + /** * struct of_clk_provider - Clock provider registration structure * @link: Entry in global list of clock providers @@ -4274,6 +4294,8 @@ int of_clk_add_provider(struct device_node *np, mutex_unlock(&of_clk_mutex); pr_debug("Added clock from %pOF\n", np); + clk_core_reparent_orphans(); + ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); @@ -4309,6 +4331,8 @@ int of_clk_add_hw_provider(struct device_node *np, mutex_unlock(&of_clk_mutex); pr_debug("Added clk_hw provider from %pOF\n", np); + clk_core_reparent_orphans(); + ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 388bdb94f841..d3486ee79ab5 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -142,6 +142,7 @@ struct clk *imx8m_clk_composite_flags(const char *name, mux->reg = reg; mux->shift = PCG_PCS_SHIFT; mux->mask = PCG_PCS_MASK; + mux->lock = &imx_ccm_lock; div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) @@ -161,6 +162,7 @@ struct clk *imx8m_clk_composite_flags(const char *name, gate_hw = &gate->hw; gate->reg = reg; gate->bit_idx = PCG_CGC_SHIFT; + gate->lock = &imx_ccm_lock; hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ops, div_hw, diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 3fdf3d494f0a..281191b55b3a 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -40,6 +40,7 @@ static const struct clk_div_table ulp_div_table[] = { { .val = 5, .div = 16, }, { .val = 6, .div = 32, }, { .val = 7, .div = 64, }, + { /* sentinel */ }, }; static const int pcc2_uart_clk_ids[] __initconst = { diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index 5c458199060a..3636c8035c7d 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -159,7 +159,7 @@ static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll) { u32 val; - return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0, + return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0, LOCK_TIMEOUT_US); } diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index a60a1be937ad..b4a95cbbda98 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock); static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"}; static DEFINE_SPINLOCK(timer_lock); -static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"}; +static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"}; static DEFINE_SPINLOCK(reset_lock); diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 38424e63bcae..7f59fb8da033 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -2186,7 +2186,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { .pd = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", }, - .pwrsts = PWRSTS_OFF_ON | VOTABLE, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { @@ -2194,7 +2195,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { .pd = { .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc", }, - .pwrsts = PWRSTS_OFF_ON | VOTABLE, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc *gcc_sc7180_gdscs[] = { diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index f7b370f3acef..f6ce888098be 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { @@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { @@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { @@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { @@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { @@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { @@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct clk_regmap *gcc_sdm845_clocks[] = { diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c index e5e2492b20c5..9b3923af02a1 100644 --- a/drivers/clk/qcom/gpucc-msm8998.c +++ b/drivers/clk/qcom/gpucc-msm8998.c @@ -242,10 +242,12 @@ static struct clk_branch gfx3d_isense_clk = { static struct gdsc gpu_cx_gdsc = { .gdscr = 0x1004, + .gds_hw_ctrl = 0x1008, .pd = { .name = "gpu_cx", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc gpu_gx_gdsc = { diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 3a991ca1ee36..c9e5a1fb6653 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -12,6 +12,7 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/clk.h> #include "clk.h" #include "clk-cpu.h" @@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np, exynos5x_subcmus); } + /* + * Keep top part of G3D clock path enabled permanently to ensure + * that the internal busses get their clock regardless of the + * main G3D clock enablement status. + */ + clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d")); + samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 45a1ed3fe674..50f8d1bc7046 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -23,9 +23,9 @@ */ static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k", - "pll-periph0", "iosc" }; + "iosc", "pll-periph0" }; static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = { - { .index = 2, .shift = 0, .width = 5 }, + { .index = 3, .shift = 0, .width = 5 }, }; static struct ccu_div ar100_clk = { @@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div r_apb1_clk = { - .div = _SUNXI_CCU_DIV(0, 2), - - .common = { - .reg = 0x00c, - .hw.init = CLK_HW_INIT("r-apb1", - "r-ahb", - &ccu_div_ops, - 0), - }, -}; +static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0); static struct ccu_div r_apb2_clk = { .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO), diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index 4646fdc61053..4c8c491b87c2 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div apb0_clk = { - .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), - - .common = { - .reg = 0x0c, - .hw.init = CLK_HW_INIT_HW("apb0", - &ahb0_clk.hw, - &ccu_div_ops, - 0), - }, -}; - -static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); +static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); /* * Define the parent as an array that can be reused to save space @@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = { static struct ccu_common *sun8i_a83t_r_ccu_clks[] = { &ar100_clk.common, - &a83t_apb0_clk.common, + &apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, &apb0_timer_clk.common, @@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = { .hws = { [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, - [CLK_APB0] = &a83t_apb0_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, [CLK_APB0_IR] = &apb0_ir_clk.common.hw, [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, @@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node, static void __init sun8i_a83t_r_ccu_setup(struct device_node *node) { - /* Fix apb0 bus gate parents here */ - apb0_gate_parent[0] = &a83t_apb0_clk.common.hw; - sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc); } CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 897490800102..23bfe1d12f21 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = { .reg = 0x1f0, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outa", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; @@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = { .reg = 0x1f4, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outb", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 5c779eec454b..0e36ca3bf3d5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_PLL_DDR1 + 1, }; static struct clk_hw_onecell_data sun8i_v3_hw_clks = { @@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_I2S0 + 1, }; static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h index b0160d305a67..108eeeedcbf7 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h @@ -51,6 +51,4 @@ #define CLK_PLL_DDR1 74 -#define CLK_NUMBER (CLK_I2S0 + 1) - #endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index e6bd6d1ea012..f6cdce441cf7 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks) periph_banks = banks; clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL); - if (!clks) + if (!clks) { kfree(periph_clk_enb_refcnt); + return NULL; + } clk_num = num; diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index f65e16c4f3c4..8d4c08b034bd 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) cinfo->iobase = of_iomap(node, 0); cinfo->dev = &pdev->dev; pm_runtime_enable(cinfo->dev); - pm_runtime_irq_safe(cinfo->dev); pm_runtime_get_sync(cinfo->dev); atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX); diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 4e54856ce2a5..c4f15c4068c0 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs) return get_cycles64(); } -static u64 riscv_sched_clock(void) +static u64 notrace riscv_sched_clock(void) { return get_cycles64(); } diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index f1d170dcf4d3..aba591d57c67 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -121,6 +121,8 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, + { .compatible = "nvidia,tegra20", }, + { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 506e3f2bf53a..83c85d3d67e3 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -434,7 +434,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) if (cur_cluster < MAX_CLUSTERS) { int cpu; - cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus); for_each_cpu(cpu, policy->cpus) per_cpu(physical_cluster, cpu) = cur_cluster; diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index de7e706efd46..6deaaf5f05b5 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * pattern detection. */ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns; - if (cpu_data->interval_idx > INTERVALS) + if (cpu_data->interval_idx >= INTERVALS) cpu_data->interval_idx = 0; } diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 26754d0570ba..b846d73d9a85 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -40,7 +40,7 @@ struct sec_req { int req_id; /* Status of the SEC request */ - int fake_busy; + atomic_t fake_busy; }; /** @@ -132,8 +132,8 @@ struct sec_debug_file { }; struct sec_dfx { - u64 send_cnt; - u64 recv_cnt; + atomic64_t send_cnt; + atomic64_t recv_cnt; }; struct sec_debug { diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 62b04e19067c..0a5391fff485 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -120,7 +120,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) return; } - __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1); + atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt); req->ctx->req_op->buf_unmap(req->ctx, req); @@ -135,13 +135,13 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); mutex_unlock(&qp_ctx->req_lock); - __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1); + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); if (ret == -EBUSY) return -ENOBUFS; if (!ret) { - if (req->fake_busy) + if (atomic_read(&req->fake_busy)) ret = -EBUSY; else ret = -EINPROGRESS; @@ -641,7 +641,7 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req) if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) sec_update_iv(req); - if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0)) + if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1) sk_req->base.complete(&sk_req->base, -EINPROGRESS); sk_req->base.complete(&sk_req->base, req->err_type); @@ -672,9 +672,9 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) } if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) - req->fake_busy = 1; + atomic_set(&req->fake_busy, 1); else - req->fake_busy = 0; + atomic_set(&req->fake_busy, 0); ret = ctx->req_op->get_res(ctx, req); if (ret) { diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 74f0654028c9..ab742dfbab99 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -608,6 +608,14 @@ static const struct file_operations sec_dbg_fops = { .write = sec_debug_write, }; +static int debugfs_atomic64_t_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL, + "%lld\n"); + static int sec_core_debug_init(struct sec_dev *sec) { struct hisi_qm *qm = &sec->qm; @@ -628,9 +636,11 @@ static int sec_core_debug_init(struct sec_dev *sec) debugfs_create_regset32("regs", 0444, tmp_d, regset); - debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt); + debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt, + &fops_atomic64_t_ro); - debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt); + debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt, + &fops_atomic64_t_ro); return 0; } diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index defe1d438710..35535833b6f7 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -83,7 +83,6 @@ config ARM_EXYNOS_BUS_DEVFREQ select DEVFREQ_GOV_PASSIVE select DEVFREQ_EVENT_EXYNOS_PPMU select PM_DEVFREQ_EVENT - select PM_OPP help This adds the common DEVFREQ driver for Exynos Memory bus. Exynos Memory bus has one more group of memory bus (e.g, MIF and INT block). @@ -98,7 +97,7 @@ config ARM_TEGRA_DEVFREQ ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \ ARCH_TEGRA_210_SOC || \ COMPILE_TEST - select PM_OPP + depends on COMMON_CLK help This adds the DEVFREQ driver for the Tegra family of SoCs. It reads ACTMON counters of memory controllers and adjusts the @@ -109,7 +108,6 @@ config ARM_TEGRA20_DEVFREQ depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST depends on COMMON_CLK select DEVFREQ_GOV_SIMPLE_ONDEMAND - select PM_OPP help This adds the DEVFREQ driver for the Tegra20 family of SoCs. It reads Memory Controller counters and adjusts the operating @@ -121,7 +119,6 @@ config ARM_RK3399_DMC_DEVFREQ select DEVFREQ_EVENT_ROCKCHIP_DFI select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_DEVFREQ_EVENT - select PM_OPP help This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). It sets the frequency for the memory controller and reads the usage counts diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index fa626acdc9b9..44af435628f8 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = { static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { .nb_channels = 6, .transfer_ord_max = 5, - .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | + JZ_SOC_DATA_BREAK_LINKS, }; static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a422a8b43cf..18c011e57592 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) descs->virt = dma_alloc_coherent(to_dev(ioat_chan), SZ_2M, &descs->hw, flags); - if (!descs->virt && (i > 0)) { + if (!descs->virt) { int idx; for (idx = 0; idx < i; idx++) { + descs = &ioat_chan->descs[idx]; dma_free_coherent(to_dev(ioat_chan), SZ_2M, descs->virt, descs->hw); descs->virt = NULL; diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index adecea51814f..c5c1aa0dcaed 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) c = p->vchan; if (c && (tc1 & BIT(i))) { spin_lock_irqsave(&c->vc.lock, flags); - vchan_cookie_complete(&p->ds_run->vd); - p->ds_done = p->ds_run; - p->ds_run = NULL; + if (p->ds_run != NULL) { + vchan_cookie_complete(&p->ds_run->vd); + p->ds_done = p->ds_run; + p->ds_run = NULL; + } spin_unlock_irqrestore(&c->vc.lock, flags); } if (c && (tc2 & BIT(i))) { @@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) return -EAGAIN; + /* Avoid losing track of ds_run if a transaction is in flight */ + if (c->phy->ds_run) + return -EAGAIN; + if (vd) { struct k3_dma_desc_sw *ds = container_of(vd, struct k3_dma_desc_sw, vd); diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index ec4adf4260a0..256fc662c500 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg) dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); - vchan_vdesc_fini(vd); - dmaengine_desc_callback_invoke(&cb, &vd->tx_result); + vchan_vdesc_fini(vd); } } diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 417dad635526..5c8272329a65 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -462,7 +462,7 @@ config EDAC_ALTERA_SDMMC config EDAC_SIFIVE bool "Sifive platform EDAC driver" - depends on EDAC=y && RISCV + depends on EDAC=y && SIFIVE_L2 help Support for error detection and correction on the SiFive SoCs. diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index 413cdb4a591d..c0cc72a3b2be 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -10,7 +10,7 @@ #include <linux/edac.h> #include <linux/platform_device.h> #include "edac_module.h" -#include <asm/sifive_l2_cache.h> +#include <soc/sifive/sifive_l2_cache.h> #define DRVNAME "sifive_edac" diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c index 5b7ef89eb701..ed10da5313e8 100644 --- a/drivers/firmware/broadcom/tee_bnxt_fw.c +++ b/drivers/firmware/broadcom/tee_bnxt_fw.c @@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev) fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); if (IS_ERR(fw_shm_pool)) { - tee_client_close_context(pvt_data.ctx); dev_err(pvt_data.dev, "tee_shm_alloc failed\n"); err = PTR_ERR(fw_shm_pool); goto out_sess; diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c index c9a0efca17b0..5d4f84781aa0 100644 --- a/drivers/firmware/efi/earlycon.c +++ b/drivers/firmware/efi/earlycon.c @@ -13,18 +13,58 @@ #include <asm/early_ioremap.h> +static const struct console *earlycon_console __initdata; static const struct font_desc *font; static u32 efi_x, efi_y; static u64 fb_base; -static pgprot_t fb_prot; +static bool fb_wb; +static void *efi_fb; + +/* + * EFI earlycon needs to use early_memremap() to map the framebuffer. + * But early_memremap() is not usable for 'earlycon=efifb keep_bootcon', + * memremap() should be used instead. memremap() will be available after + * paging_init() which is earlier than initcall callbacks. Thus adding this + * early initcall function early_efi_map_fb() to map the whole EFI framebuffer. + */ +static int __init efi_earlycon_remap_fb(void) +{ + /* bail if there is no bootconsole or it has been disabled already */ + if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED)) + return 0; + + efi_fb = memremap(fb_base, screen_info.lfb_size, + fb_wb ? MEMREMAP_WB : MEMREMAP_WC); + + return efi_fb ? 0 : -ENOMEM; +} +early_initcall(efi_earlycon_remap_fb); + +static int __init efi_earlycon_unmap_fb(void) +{ + /* unmap the bootconsole fb unless keep_bootcon has left it enabled */ + if (efi_fb && !(earlycon_console->flags & CON_ENABLED)) + memunmap(efi_fb); + return 0; +} +late_initcall(efi_earlycon_unmap_fb); static __ref void *efi_earlycon_map(unsigned long start, unsigned long len) { + pgprot_t fb_prot; + + if (efi_fb) + return efi_fb + start; + + fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL); return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot)); } static __ref void efi_earlycon_unmap(void *addr, unsigned long len) { + if (efi_fb) + return; + early_memunmap(addr, len); } @@ -176,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) fb_base |= (u64)screen_info.ext_lfb_base << 32; - if (opt && !strcmp(opt, "ram")) - fb_prot = PAGE_KERNEL; - else - fb_prot = pgprot_writecombine(PAGE_KERNEL); + fb_wb = opt && !strcmp(opt, "ram"); si = &screen_info; xres = si->lfb_width; @@ -201,6 +238,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, efi_earlycon_scroll_up(); device->con->write = efi_earlycon_write; + earlycon_console = device->con; return 0; } EARLYCON_DECLARE(efifb, efi_earlycon_setup); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 407816da9fcb..2b02cb165f16 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -979,6 +979,24 @@ static int __init efi_memreserve_map_root(void) return 0; } +static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) +{ + struct resource *res, *parent; + + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return -ENOMEM; + + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = addr; + res->end = addr + size - 1; + + /* we expect a conflict with a 'System RAM' region */ + parent = request_resource_conflict(&iomem_resource, res); + return parent ? request_resource(parent, res) : 0; +} + int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) { struct linux_efi_memreserve *rsv; @@ -1003,7 +1021,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) rsv->entry[index].size = size; memunmap(rsv); - return 0; + return efi_mem_reserve_iomem(addr, size); } memunmap(rsv); } @@ -1013,6 +1031,12 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) if (!rsv) return -ENOMEM; + rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); + if (rc) { + free_page((unsigned long)rsv); + return rc; + } + /* * The memremap() call above assumes that a linux_efi_memreserve entry * never crosses a page boundary, so let's ensure that this remains true @@ -1029,7 +1053,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) efi_memreserve_root->next = __pa(rsv); spin_unlock(&efi_mem_reserve_persistent_lock); - return 0; + return efi_mem_reserve_iomem(addr, size); } static int __init efi_memreserve_root_init(void) diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 0101ca4c13b1..b7bf1e993b8b 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -84,30 +84,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, } static efi_status_t -__gop_query32(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_32 *gop32, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_32 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop32->mode; - mode = (struct efi_graphics_output_protocol_mode_32 *)m; - query_mode = (void *)(unsigned long)gop32->query_mode; - - status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - - *fb_base = mode->frame_buffer_base; - return status; -} - -static efi_status_t setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, efi_guid_t *proto, unsigned long size, void **gop_handle) { @@ -119,7 +95,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, u64 fb_base; struct efi_pixel_bitmask pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; + efi_status_t status; u32 *handles = (u32 *)(unsigned long)gop_handle; int i; @@ -128,6 +104,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, nr_gops = size / sizeof(u32); for (i = 0; i < nr_gops; i++) { + struct efi_graphics_output_protocol_mode_32 *mode; struct efi_graphics_output_mode_info *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; @@ -145,9 +122,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query32(sys_table_arg, gop32, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = (void *)(unsigned long)gop32->mode; + info = (void *)(unsigned long)mode->info; + current_fb_base = mode->frame_buffer_base; + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -175,7 +154,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -197,32 +176,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; -} - -static efi_status_t -__gop_query64(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_64 *gop64, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_64 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop64->mode; - mode = (struct efi_graphics_output_protocol_mode_64 *)m; - query_mode = (void *)(unsigned long)gop64->query_mode; - - status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - *fb_base = mode->frame_buffer_base; - return status; + return EFI_SUCCESS; } static efi_status_t @@ -237,7 +192,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, u64 fb_base; struct efi_pixel_bitmask pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; + efi_status_t status; u64 *handles = (u64 *)(unsigned long)gop_handle; int i; @@ -246,6 +201,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, nr_gops = size / sizeof(u64); for (i = 0; i < nr_gops; i++) { + struct efi_graphics_output_protocol_mode_64 *mode; struct efi_graphics_output_mode_info *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; @@ -263,9 +219,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query64(sys_table_arg, gop64, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = (void *)(unsigned long)gop64->mode; + info = (void *)(unsigned long)mode->info; + current_fb_base = mode->frame_buffer_base; + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -293,7 +251,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -315,8 +273,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; + + return EFI_SUCCESS; } /* diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 35edd7cfb6a1..97378cf96a2e 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -33,7 +33,7 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_status_t status; - struct efi_rng_protocol *rng; + struct efi_rng_protocol *rng = NULL; status = efi_call_early(locate_protocol, &rng_proto, NULL, (void **)&rng); @@ -162,8 +162,8 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; - struct efi_rng_protocol *rng; - struct linux_efi_random_seed *seed; + struct efi_rng_protocol *rng = NULL; + struct linux_efi_random_seed *seed = NULL; efi_status_t status; status = efi_call_early(locate_protocol, &rng_proto, NULL, diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c index 76b0c354a027..de1a9a1f9f14 100644 --- a/drivers/firmware/efi/rci2-table.c +++ b/drivers/firmware/efi/rci2-table.c @@ -81,6 +81,9 @@ static int __init efi_rci2_sysfs_init(void) struct kobject *tables_kobj; int ret = -ENOMEM; + if (rci2_table_phys == EFI_INVALID_TABLE_ADDR) + return 0; + rci2_base = memremap(rci2_table_phys, sizeof(struct rci2_table_global_hdr), MEMREMAP_WB); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 8adffd42f8cb..4b6d2ef15c39 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -553,8 +553,8 @@ config GPIO_TEGRA config GPIO_TEGRA186 tristate "NVIDIA Tegra186 GPIO support" - default ARCH_TEGRA_186_SOC - depends on ARCH_TEGRA_186_SOC || COMPILE_TEST + default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC + depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST depends on OF_GPIO select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY @@ -573,7 +573,6 @@ config GPIO_THUNDERX tristate "Cavium ThunderX/OCTEON-TX GPIO" depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) depends on PCI_MSI - select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS help @@ -1148,6 +1147,7 @@ config GPIO_MADERA config GPIO_MAX77620 tristate "GPIO support for PMIC MAX77620 and MAX20024" depends on MFD_MAX77620 + select GPIOLIB_IRQCHIP help GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor. MAX77620 PMIC has 8 pins that can be configured as GPIOs. The diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c index 7e99860ca447..8319812593e3 100644 --- a/drivers/gpio/gpio-aspeed-sgpio.c +++ b/drivers/gpio/gpio-aspeed-sgpio.c @@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio, return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS; default: /* acturally if code runs to here, it's an error case */ - BUG_ON(1); + BUG(); } } diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 56d647a30e3e..94b8d3ae27bc 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, mutex_lock(&chip->lock); if (test_bit(FLAG_REQUESTED, &desc->flags) && - !test_bit(FLAG_IS_OUT, &desc->flags)) { + !test_bit(FLAG_IS_OUT, &desc->flags)) { curr = __gpio_mockup_get(chip, offset); if (curr == value) goto out; @@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, irq_type = irq_get_trigger_type(irq); if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) || - (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) + (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) irq_sim_fire(sim, offset); } @@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset) int direction; mutex_lock(&chip->lock); - direction = !chip->lines[offset].dir; + direction = chip->lines[offset].dir; mutex_unlock(&chip->lock); return direction; @@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev) struct gpio_chip *gc; struct device *dev; const char *name; - int rv, base; + int rv, base, i; u16 ngpio; dev = &pdev->dev; @@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev) if (!chip->lines) return -ENOMEM; + for (i = 0; i < gc->ngpio; i++) + chip->lines[i].dir = GPIO_LINE_DIRECTION_IN; + if (device_property_read_bool(dev, "named-gpio-lines")) { rv = gpio_mockup_name_lines(dev, chip); if (rv) diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index f1e164cecff8..5ae30de3490a 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) return -ENOMEM; gc = &mpc8xxx_gc->gc; + gc->parent = &pdev->dev; if (of_property_read_bool(np, "little-endian")) { ret = bgpio_init(gc, &pdev->dev, 4, diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 6652bee01966..9853547e7276 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ); + clear_bit(hwirq, chip->irq_mask); } static void pca953x_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ); + set_bit(hwirq, chip->irq_mask); } static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on) @@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); - int bank_nb = d->hwirq / BANK_SZ; - u8 mask = BIT(d->hwirq % BANK_SZ); + irq_hw_number_t hwirq = irqd_to_hwirq(d); if (!(type & IRQ_TYPE_EDGE_BOTH)) { dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", @@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) return -EINVAL; } - if (type & IRQ_TYPE_EDGE_FALLING) - chip->irq_trig_fall[bank_nb] |= mask; - else - chip->irq_trig_fall[bank_nb] &= ~mask; - - if (type & IRQ_TYPE_EDGE_RISING) - chip->irq_trig_raise[bank_nb] |= mask; - else - chip->irq_trig_raise[bank_nb] &= ~mask; + assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING); + assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING); return 0; } @@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); - u8 mask = BIT(d->hwirq % BANK_SZ); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask; - chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask; + clear_bit(hwirq, chip->irq_trig_raise); + clear_bit(hwirq, chip->irq_trig_fall); } static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending) diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index d08d86a22b1f..462770479045 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -53,6 +53,7 @@ struct thunderx_line { struct thunderx_gpio { struct gpio_chip chip; u8 __iomem *register_base; + struct irq_domain *irqd; struct msix_entry *msix_entries; /* per line MSI-X */ struct thunderx_line *line_entries; /* per line irq info */ raw_spinlock_t lock; @@ -285,60 +286,54 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip, } } -static void thunderx_gpio_irq_ack(struct irq_data *d) +static void thunderx_gpio_irq_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask(struct irq_data *d) +static void thunderx_gpio_irq_mask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask_ack(struct irq_data *d) +static void thunderx_gpio_irq_mask_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_unmask(struct irq_data *d) +static void thunderx_gpio_irq_unmask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1S, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static int thunderx_gpio_irq_set_type(struct irq_data *d, +static int thunderx_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - struct thunderx_line *txline = - &txgpio->line_entries[irqd_to_hwirq(d)]; + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; u64 bit_cfg; - irqd_set_trigger_type(d, flow_type); + irqd_set_trigger_type(data, flow_type); bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN; if (flow_type & IRQ_TYPE_EDGE_BOTH) { - irq_set_handler_locked(d, handle_fasteoi_ack_irq); + irq_set_handler_locked(data, handle_fasteoi_ack_irq); bit_cfg |= GPIO_BIT_CFG_INT_TYPE; } else { - irq_set_handler_locked(d, handle_fasteoi_mask_irq); + irq_set_handler_locked(data, handle_fasteoi_mask_irq); } raw_spin_lock(&txgpio->lock); @@ -367,6 +362,33 @@ static void thunderx_gpio_irq_disable(struct irq_data *data) irq_chip_disable_parent(data); } +static int thunderx_gpio_irq_request_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + int r; + + r = gpiochip_lock_as_irq(&txgpio->chip, txline->line); + if (r) + return r; + + r = irq_chip_request_resources_parent(data); + if (r) + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); + + return r; +} + +static void thunderx_gpio_irq_release_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + + irq_chip_release_resources_parent(data); + + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); +} + /* * Interrupts are chained from underlying MSI-X vectors. We have * these irq_chip functions to be able to handle level triggering @@ -383,24 +405,50 @@ static struct irq_chip thunderx_gpio_irq_chip = { .irq_unmask = thunderx_gpio_irq_unmask, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_request_resources = thunderx_gpio_irq_request_resources, + .irq_release_resources = thunderx_gpio_irq_release_resources, .irq_set_type = thunderx_gpio_irq_set_type, .flags = IRQCHIP_SET_TYPE_MASKED }; -static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, - unsigned int child, - unsigned int child_type, - unsigned int *parent, - unsigned int *parent_type) +static int thunderx_gpio_irq_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) { - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - - *parent = txgpio->base_msi + (2 * child); - *parent_type = IRQ_TYPE_LEVEL_HIGH; + struct thunderx_gpio *txgpio = d->host_data; + + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + if (fwspec->param[0] >= txgpio->chip.ngpio) + return -EINVAL; + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } +static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct thunderx_line *txline = arg; + + return irq_domain_set_hwirq_and_chip(d, virq, txline->line, + &thunderx_gpio_irq_chip, txline); +} + +static const struct irq_domain_ops thunderx_gpio_irqd_ops = { + .alloc = thunderx_gpio_irq_alloc, + .translate = thunderx_gpio_irq_translate +}; + +static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) +{ + struct thunderx_gpio *txgpio = gpiochip_get_data(chip); + + return irq_find_mapping(txgpio->irqd, offset); +} + static int thunderx_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -408,7 +456,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, struct device *dev = &pdev->dev; struct thunderx_gpio *txgpio; struct gpio_chip *chip; - struct gpio_irq_chip *girq; int ngpio, i; int err = 0; @@ -453,8 +500,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, } txgpio->msix_entries = devm_kcalloc(dev, - ngpio, sizeof(struct msix_entry), - GFP_KERNEL); + ngpio, sizeof(struct msix_entry), + GFP_KERNEL); if (!txgpio->msix_entries) { err = -ENOMEM; goto out; @@ -495,6 +542,27 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, if (err < 0) goto out; + /* + * Push GPIO specific irqdomain on hierarchy created as a side + * effect of the pci_enable_msix() + */ + txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain, + 0, 0, of_node_to_fwnode(dev->of_node), + &thunderx_gpio_irqd_ops, txgpio); + if (!txgpio->irqd) { + err = -ENOMEM; + goto out; + } + + /* Push on irq_data and the domain for each line. */ + for (i = 0; i < ngpio; i++) { + err = irq_domain_push_irq(txgpio->irqd, + txgpio->msix_entries[i].vector, + &txgpio->line_entries[i]); + if (err < 0) + dev_err(dev, "irq_domain_push_irq: %d\n", err); + } + chip->label = KBUILD_MODNAME; chip->parent = dev; chip->owner = THIS_MODULE; @@ -509,28 +577,11 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, chip->set = thunderx_gpio_set; chip->set_multiple = thunderx_gpio_set_multiple; chip->set_config = thunderx_gpio_set_config; - girq = &chip->irq; - girq->chip = &thunderx_gpio_irq_chip; - girq->fwnode = of_node_to_fwnode(dev->of_node); - girq->parent_domain = - irq_get_irq_data(txgpio->msix_entries[0].vector)->domain; - girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq; - girq->handler = handle_bad_irq; - girq->default_type = IRQ_TYPE_NONE; - + chip->to_irq = thunderx_gpio_to_irq; err = devm_gpiochip_add_data(dev, chip, txgpio); if (err) goto out; - /* Push on irq_data and the domain for each line. */ - for (i = 0; i < ngpio; i++) { - err = irq_domain_push_irq(chip->irq.domain, - txgpio->msix_entries[i].vector, - chip); - if (err < 0) - dev_err(dev, "irq_domain_push_irq: %d\n", err); - } - dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n", ngpio, chip->base); return 0; @@ -545,10 +596,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev) struct thunderx_gpio *txgpio = pci_get_drvdata(pdev); for (i = 0; i < txgpio->chip.ngpio; i++) - irq_domain_pop_irq(txgpio->chip.irq.domain, + irq_domain_pop_irq(txgpio->irqd, txgpio->msix_entries[i].vector); - irq_domain_remove(txgpio->chip.irq.domain); + irq_domain_remove(txgpio->irqd); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c index 773e5c24309e..b21c2e436b61 100644 --- a/drivers/gpio/gpio-xgs-iproc.c +++ b/drivers/gpio/gpio-xgs-iproc.c @@ -280,7 +280,7 @@ static int iproc_gpio_probe(struct platform_device *pdev) return 0; } -static int __exit iproc_gpio_remove(struct platform_device *pdev) +static int iproc_gpio_remove(struct platform_device *pdev) { struct iproc_gpio_chip *chip; diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c index 08d7c3b32038..c8af34a6368f 100644 --- a/drivers/gpio/gpio-xtensa.c +++ b/drivers/gpio/gpio-xtensa.c @@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable) unsigned long flags; local_irq_save(flags); - RSR_CPENABLE(*cpenable); - WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP)); - + *cpenable = xtensa_get_sr(cpenable); + xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable); return flags; } static inline void disable_cp(unsigned long flags, unsigned long cpenable) { - WSR_CPENABLE(cpenable); + xtensa_set_sr(cpenable, cpenable); local_irq_restore(flags); } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 4c3f6370eab4..05ba16fffdad 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -684,6 +684,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) unsigned int bank_num; for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { + writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); writel_relaxed(gpio->context.datalsw[bank_num], gpio->base_addr + ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num)); @@ -693,9 +695,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.dirm[bank_num], gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); - writel_relaxed(gpio->context.int_en[bank_num], - gpio->base_addr + - ZYNQ_GPIO_INTEN_OFFSET(bank_num)); writel_relaxed(gpio->context.int_type[bank_num], gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); @@ -705,6 +704,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.int_any[bank_num], gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); + writel_relaxed(~(gpio->context.int_en[bank_num]), + gpio->base_addr + + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); } } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d30e57dc755c..31fee5e918b7 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -21,11 +21,19 @@ #include "gpiolib.h" #include "gpiolib-acpi.h" +#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l +#define QUIRK_NO_WAKEUP 0x02l + static int run_edge_events_on_boot = -1; module_param(run_edge_events_on_boot, int, 0444); MODULE_PARM_DESC(run_edge_events_on_boot, "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); +static int honor_wakeup = -1; +module_param(honor_wakeup, int, 0444); +MODULE_PARM_DESC(honor_wakeup, + "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto"); + /** * struct acpi_gpio_event - ACPI GPIO event handler data * @@ -281,7 +289,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, event->handle = evt_handle; event->handler = handler; event->irq = irq; - event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE; + event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE; event->pin = pin; event->desc = desc; @@ -1309,7 +1317,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void) /* We must use _sync so that this runs after the first deferred_probe run */ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); -static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { +static const struct dmi_system_id gpiolib_acpi_quirks[] = { { /* * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for @@ -1319,7 +1327,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), - } + }, + .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, }, { /* @@ -1331,20 +1340,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), - } + }, + .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, + }, + { + /* + * Various HP X2 10 Cherry Trail models use an external + * embedded-controller connected via I2C + an ACPI GPIO + * event handler. The embedded controller generates various + * spurious wakeup events when suspended. So disable wakeup + * for its handler (it uses the only ACPI GPIO event handler). + * This breaks wakeup when opening the lid, the user needs + * to press the power-button to wakeup the system. The + * alternative is suspend simply not working, which is worse. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), + }, + .driver_data = (void *)QUIRK_NO_WAKEUP, }, {} /* Terminating entry */ }; static int acpi_gpio_setup_params(void) { + const struct dmi_system_id *id; + long quirks = 0; + + id = dmi_first_match(gpiolib_acpi_quirks); + if (id) + quirks = (long)id->driver_data; + if (run_edge_events_on_boot < 0) { - if (dmi_check_system(run_edge_events_on_boot_blacklist)) + if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT) run_edge_events_on_boot = 0; else run_edge_events_on_boot = 1; } + if (honor_wakeup < 0) { + if (quirks & QUIRK_NO_WAKEUP) + honor_wakeup = 0; + else + honor_wakeup = 1; + } + return 0; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index dc27b1a88e93..b696e4598a24 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -23,6 +23,29 @@ #include "gpiolib.h" #include "gpiolib-of.h" +/** + * of_gpio_spi_cs_get_count() - special GPIO counting for SPI + * Some elder GPIO controllers need special quirks. Currently we handle + * the Freescale GPIO controller with bindings that doesn't use the + * established "cs-gpios" for chip selects but instead rely on + * "gpios" for the chip select lines. If we detect this, we redirect + * the counting of "cs-gpios" to count "gpios" transparent to the + * driver. + */ +static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id) +{ + struct device_node *np = dev->of_node; + + if (!IS_ENABLED(CONFIG_SPI_MASTER)) + return 0; + if (!con_id || strcmp(con_id, "cs")) + return 0; + if (!of_device_is_compatible(np, "fsl,spi") && + !of_device_is_compatible(np, "aeroflexgaisler,spictrl")) + return 0; + return of_gpio_named_count(np, "gpios"); +} + /* * This is used by external users of of_gpio_count() from <linux/of_gpio.h> * @@ -35,6 +58,10 @@ int of_gpio_get_count(struct device *dev, const char *con_id) char propname[32]; unsigned int i; + ret = of_gpio_spi_cs_get_count(dev, con_id); + if (ret > 0) + return ret; + for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { if (con_id) snprintf(propname, sizeof(propname), "%s-%s", diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9913886ede90..78a16e42f222 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc) chip = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); + /* + * Open drain emulation using input mode may incorrectly report + * input here, fix that up. + */ + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && + test_bit(FLAG_IS_OUT, &desc->flags)) + return 0; + if (!chip->get_direction) return -ENOTSUPP; @@ -4472,8 +4480,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, if (chip->ngpio <= p->chip_hwnum) { dev_err(dev, - "requested GPIO %d is out of range [0..%d] for chip %s\n", - idx, chip->ngpio, chip->label); + "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n", + idx, p->chip_hwnum, chip->ngpio - 1, + chip->label); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 875a3a9eabfa..7d0e7b031e44 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { +static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { .get_modes = malidp_mw_connector_get_modes, .mode_valid = malidp_mw_connector_mode_valid, }; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 021c5a98db09..38bf111e5f9b 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -398,7 +398,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); idx += req->u.i2c_read.transactions[i].num_bytes; - buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); idx++; } @@ -1209,6 +1209,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { mstb->tx_slots[txmsg->seqno] = NULL; } + mgr->is_waiting_for_dwn_reply = false; + } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ -1218,6 +1220,7 @@ out: } mutex_unlock(&mgr->qlock); + drm_dp_mst_kick_tx(mgr); return ret; } @@ -2342,7 +2345,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, { struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_ddps, ret; + int old_ddps, old_input, ret, i; u8 new_pdt; bool dowork = false, create_connector = false; @@ -2373,6 +2376,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, } old_ddps = port->ddps; + old_input = port->input; port->input = conn_stat->input_port; port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; @@ -2397,6 +2401,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, dowork = false; } + if (!old_input && old_ddps != port->ddps && !port->ddps) { + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + struct drm_dp_mst_port *port_validated; + + if (!vcpi) + continue; + + port_validated = + container_of(vcpi, struct drm_dp_mst_port, vcpi); + port_validated = + drm_dp_mst_topology_get_port_validated(mgr, port_validated); + if (!port_validated) { + mutex_lock(&mgr->payload_lock); + vcpi->num_slots = 0; + mutex_unlock(&mgr->payload_lock); + } else { + drm_dp_mst_topology_put_port(port_validated); + } + } + } + if (port->connector) drm_modeset_unlock(&mgr->base.lock); else if (create_connector) @@ -2759,9 +2785,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ret = process_single_tx_qlock(mgr, txmsg, false); if (ret == 1) { /* txmsg is sent it should be in the slots now */ + mgr->is_waiting_for_dwn_reply = true; list_del(&txmsg->next); } else if (ret) { DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + mgr->is_waiting_for_dwn_reply = false; list_del(&txmsg->next); if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2801,7 +2829,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } - if (list_is_singular(&mgr->tx_msg_downq)) + if (list_is_singular(&mgr->tx_msg_downq) && + !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -3756,6 +3785,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; mstb->tx_slots[slot] = NULL; + mgr->is_waiting_for_dwn_reply = false; mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); @@ -3765,6 +3795,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) no_msg: drm_dp_mst_topology_put_mstb(mstb); clear_down_rep_recv: + mutex_lock(&mgr->qlock); + mgr->is_waiting_for_dwn_reply = false; + mutex_unlock(&mgr->qlock); memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -4597,7 +4630,7 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (!list_empty(&mgr->tx_msg_downq)) + if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 57f510687b85..4c7cbce7bae7 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1267,7 +1267,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, * Changes struct fb_var_screeninfo are currently not pushed back * to KMS, hence fail if different settings are requested. */ - if (var->bits_per_pixel != fb->format->cpp[0] * 8 || + if (var->bits_per_pixel > fb->format->cpp[0] * 8 || var->xres > fb->width || var->yres > fb->height || var->xres_virtual > fb->width || var->yres_virtual > fb->height) { drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb " @@ -1293,6 +1293,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, } /* + * Likewise, bits_per_pixel should be rounded up to a supported value. + */ + var->bits_per_pixel = fb->format->cpp[0] * 8; + + /* * drm fbdev emulation doesn't support changing the pixel format at all, * so reject all pixel format changing requests. */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 7ae087b0504d..88b6fcaa20be 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1313,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + component_del(dev, &gsc_component_ops); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 9e430590fb3a..0cf0f6fae675 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2836,6 +2836,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); + /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_STORE_DATA_INDEX | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE, + LRC_PPHWSP_SCRATCH_ADDR); + batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); /* WaMediaPoolStateCmdInWABB:bxt,glk */ diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index e451298d11c3..2477a1e5a166 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -36,13 +36,32 @@ #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) +static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, + unsigned long size, + dma_addr_t dma_addr) +{ + int ret = 0; + + if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) + ret = -EINVAL; + + return ret; +} + +static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) +{ + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); +} + static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct intel_vgpu *vgpu; struct sg_table *st; struct scatterlist *sg; - int i, ret; + int i, j, ret; gen8_pte_t __iomem *gtt_entries; struct intel_vgpu_fb_info *fb_info; u32 page_num; @@ -51,6 +70,10 @@ static int vgpu_gem_get_pages( if (WARN_ON(!fb_info)) return -ENODEV; + vgpu = fb_info->obj->vgpu; + if (WARN_ON(!vgpu)) + return -ENODEV; + st = kmalloc(sizeof(*st), GFP_KERNEL); if (unlikely(!st)) return -ENOMEM; @@ -64,21 +87,53 @@ static int vgpu_gem_get_pages( gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + (fb_info->start >> PAGE_SHIFT); for_each_sg(st->sgl, sg, page_num, i) { + dma_addr_t dma_addr = + GEN8_DECODE_PTE(readq(>t_entries[i])); + if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { + ret = -EINVAL; + goto out; + } + sg->offset = 0; sg->length = PAGE_SIZE; - sg_dma_address(sg) = - GEN8_DECODE_PTE(readq(>t_entries[i])); sg_dma_len(sg) = PAGE_SIZE; + sg_dma_address(sg) = dma_addr; } __i915_gem_object_set_pages(obj, st, PAGE_SIZE); +out: + if (ret) { + dma_addr_t dma_addr; + + for_each_sg(st->sgl, sg, i, j) { + dma_addr = sg_dma_address(sg); + if (dma_addr) + vgpu_unpin_dma_address(vgpu, dma_addr); + } + sg_free_table(st); + kfree(st); + } + + return ret; - return 0; } static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { + struct scatterlist *sg; + + if (obj->base.dma_buf) { + struct intel_vgpu_fb_info *fb_info = obj->gvt_info; + struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; + struct intel_vgpu *vgpu = obj->vgpu; + int i; + + for_each_sg(pages->sgl, sg, fb_info->size, i) + vgpu_unpin_dma_address(vgpu, + sg_dma_address(sg)); + } + sg_free_table(pages); kfree(pages); } @@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, drm_gem_private_object_init(dev, &obj->base, roundup(info->size, PAGE_SIZE)); i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); + i915_gem_object_set_readonly(obj); obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 21af822a79e0..6d28d72e6c7e 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); engine_mask |= BIT(VCS1); } + if (data & GEN9_GRDOM_GUC) { + gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); + vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; + } engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; } @@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, return 0; } +static int guc_status_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, + unsigned int bytes) +{ + /* keep MIA_IN_RESET before clearing */ + read_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET; + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2672,6 +2686,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); + MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL); + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 9599c0a762b2..b17c4a1599cd 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -66,6 +66,8 @@ struct intel_gvt_mpt { unsigned long size, dma_addr_t *dma_addr); void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 04a5a0d90823..3259a1fa69e1 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1916,6 +1916,28 @@ err_unlock: return ret; } +static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) +{ + struct kvmgt_guest_info *info; + struct gvt_dma *entry; + int ret = 0; + + if (!handle_valid(handle)) + return -ENODEV; + + info = (struct kvmgt_guest_info *)handle; + + mutex_lock(&info->vgpu->vdev.cache_lock); + entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); + if (entry) + kref_get(&entry->ref); + else + ret = -ENOMEM; + mutex_unlock(&info->vgpu->vdev.cache_lock); + + return ret; +} + static void __gvt_dma_release(struct kref *ref) { struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); @@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = { .gfn_to_mfn = kvmgt_gfn_to_pfn, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, + .dma_pin_guest_page = kvmgt_dma_pin_guest_page, .set_opregion = kvmgt_set_opregion, .set_edid = kvmgt_set_edid, .get_vfio_device = kvmgt_get_vfio_device, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 0f9440128123..9ad224df9c68 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -255,6 +255,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page( } /** + * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf + * @vgpu: a vGPU + * @dma_addr: guest dma addr + * + * Returns: + * 0 on success, negative error code if failed. + */ +static inline int +intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) +{ + return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); +} + +/** * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN * @vgpu: a vGPU * @gfn: guest PFN diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index d5a6e4e3d0fd..85bd9bf4f6ee 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) { - mutex_lock(&vgpu->gvt->lock); + mutex_lock(&vgpu->vgpu_lock); vgpu->active = true; - mutex_unlock(&vgpu->gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } /** diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 3b5e016d16c4..5fa1073cf26b 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) { u32 timcon0, timcon1, timcon2, timcon3; - u32 ui, cycle_time; + u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000); struct mtk_phy_timing *timing = &dsi->phy_timing; - ui = DIV_ROUND_UP(1000000000, dsi->data_rate); - cycle_time = div_u64(8000000000ULL, dsi->data_rate); + timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1; + timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000; + timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 - + timing->da_hs_prepare; + timing->da_hs_trail = timing->da_hs_prepare + 1; - timing->lpx = NS_TO_CYCLE(60, cycle_time); - timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time); - timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time); - timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time); + timing->ta_go = 4 * timing->lpx - 2; + timing->ta_sure = timing->lpx + 2; + timing->ta_get = 4 * timing->lpx; + timing->da_hs_exit = 2 * timing->lpx + 1; - timing->ta_go = 4 * timing->lpx; - timing->ta_sure = 3 * timing->lpx / 2; - timing->ta_get = 5 * timing->lpx; - timing->da_hs_exit = 2 * timing->lpx; - - timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time); - timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10; - - timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time); - timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time); - timing->clk_hs_exit = 2 * timing->lpx; + timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000); + timing->clk_hs_post = timing->clk_hs_prepare + 8; + timing->clk_hs_trail = timing->clk_hs_prepare; + timing->clk_hs_zero = timing->clk_hs_trail * 4; + timing->clk_hs_exit = 2 * timing->clk_hs_trail; timcon0 = timing->lpx | timing->da_hs_prepare << 8 | timing->da_hs_zero << 16 | timing->da_hs_trail << 24; @@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) dsi_tmp_buf_bpp - 10); data_phy_cycles = timing->lpx + timing->da_hs_prepare + - timing->da_hs_zero + timing->da_hs_exit + 2; + timing->da_hs_zero + timing->da_hs_exit + 3; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { - if (vm->hfront_porch * dsi_tmp_buf_bpp > + if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > data_phy_cycles * dsi->lanes + 18) { - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp - - data_phy_cycles * - dsi->lanes - 18; + horizontal_frontporch_byte = + vm->hfront_porch * dsi_tmp_buf_bpp - + (data_phy_cycles * dsi->lanes + 18) * + vm->hfront_porch / + (vm->hfront_porch + vm->hback_porch); + + horizontal_backporch_byte = + horizontal_backporch_byte - + (data_phy_cycles * dsi->lanes + 18) * + vm->hback_porch / + (vm->hfront_porch + vm->hback_porch); } else { DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; } } else { - if (vm->hfront_porch * dsi_tmp_buf_bpp > + if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > data_phy_cycles * dsi->lanes + 12) { - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp - - data_phy_cycles * - dsi->lanes - 12; + horizontal_frontporch_byte = + vm->hfront_porch * dsi_tmp_buf_bpp - + (data_phy_cycles * dsi->lanes + 12) * + vm->hfront_porch / + (vm->hfront_porch + vm->hback_porch); + horizontal_backporch_byte = horizontal_backporch_byte - + (data_phy_cycles * dsi->lanes + 12) * + vm->hback_porch / + (vm->hfront_porch + vm->hback_porch); } else { DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); horizontal_frontporch_byte = vm->hfront_porch * diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index 83c4586665b4..81ac9b658a70 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -95,7 +95,7 @@ struct cdn_dp_device { struct cdn_dp_port *port[MAX_PHY]; u8 ports; u8 max_lanes; - u8 max_rate; + unsigned int max_rate; u8 lanes; int active_port; diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index a7c4654445c7..68d4644ac2dc 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -685,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, struct sun4i_hdmi *hdmi = dev_get_drvdata(dev); cec_unregister_adapter(hdmi->cec_adap); - drm_connector_cleanup(&hdmi->connector); - drm_encoder_cleanup(&hdmi->encoder); i2c_del_adapter(hdmi->i2c); i2c_put_adapter(hdmi->ddc_i2c); clk_disable_unprepare(hdmi->mod_clk); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 42651d737c55..c81cdce6ed55 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, WARN_ON(!tcon->quirks->has_channel_0); - tcon->dclk_min_div = 1; + tcon->dclk_min_div = tcon->quirks->dclk_min_div; tcon->dclk_max_div = 127; sun4i_tcon0_mode_set_common(tcon, mode); @@ -1426,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, static const struct sun4i_tcon_quirks sun4i_a10_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, .set_mux = sun4i_a10_tcon_set_mux, }; static const struct sun4i_tcon_quirks sun5i_a13_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, .set_mux = sun5i_a13_tcon_set_mux, }; @@ -1440,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = { .has_channel_1 = true, .has_lvds_alt = true, .needs_de_be_mux = true, + .dclk_min_div = 1, .set_mux = sun6i_tcon_set_mux, }; @@ -1447,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = { .has_channel_0 = true, .has_channel_1 = true, .needs_de_be_mux = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun7i_a20_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, /* Same display pipeline structure as A10 */ .set_mux = sun4i_a10_tcon_set_mux, }; @@ -1459,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = { static const struct sun4i_tcon_quirks sun8i_a33_quirks = { .has_channel_0 = true, .has_lvds_alt = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { .supports_lvds = true, .has_channel_0 = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = { @@ -1477,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = { static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { .has_channel_0 = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = { - .has_channel_0 = true, - .needs_edp_reset = true, + .has_channel_0 = true, + .needs_edp_reset = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index f9f1fe80b206..a62ec826ae71 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -224,6 +224,7 @@ struct sun4i_tcon_quirks { bool needs_de_be_mux; /* sun6i needs mux to select backend */ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ bool supports_lvds; /* Does the TCON support an LVDS output? */ + u8 dclk_min_div; /* minimum divider for TCON0 DCLK */ /* callback to handle tcon muxing options */ int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index ac42c84d2d7f..d1c3f5fbfee4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -260,6 +260,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (!objs) return; virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); + virtio_gpu_array_lock_resv(objs); virtio_gpu_cmd_transfer_to_host_2d (vgdev, 0, plane->state->crtc_w, diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 8063b1d567b1..e6e4c841fb06 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 && - (usage->hid & HID_USAGE) != 0x00 && !usage->type) { + (usage->hid & HID_USAGE) != 0x00 && + (usage->hid & HID_USAGE) != 0xff && !usage->type) { hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n", usage->hid & HID_USAGE); } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e0b241bd3070..851fe54ea59e 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -288,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign offset = report->size; report->size += parser->global.report_size * parser->global.report_count; + /* Total size check: Allow for possible report index byte */ + if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { + hid_err(parser->device, "report is too long\n"); + return -1; + } + if (!parser->local.usage_index) /* Ignore padding fields */ return 0; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7e1689ef35f5..3a400ce603c4 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -631,6 +631,7 @@ #define USB_VENDOR_ID_ITE 0x048d #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 +#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a #define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396 #define USB_DEVICE_ID_ITE8595 0x8595 @@ -730,6 +731,7 @@ #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 #define USB_DEVICE_ID_LG_MELFAS_MT 0x6007 #define I2C_DEVICE_ID_LG_8001 0x8001 +#define I2C_DEVICE_ID_LG_7010 0x7010 #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e @@ -1102,6 +1104,7 @@ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 +#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 63855f275a38..dea9cc65bf80 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: - if (device->driver->input_mapped && device->driver->input_mapped(device, - hidinput, field, usage, &bit, &max) < 0) - goto ignore; + if (device->driver->input_mapped && + device->driver->input_mapped(device, hidinput, field, usage, + &bit, &max) < 0) { + /* + * The driver indicated that no further generic handling + * of the usage is desired. + */ + return; + } set_bit(usage->type, input->evbit); @@ -1215,9 +1221,11 @@ mapped: set_bit(MSC_SCAN, input->mscbit); } -ignore: return; +ignore: + usage->type = 0; + usage->code = 0; } static void hidinput_handle_scroll(struct hid_usage *usage, diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index a45f2352618d..c436e12feb23 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -40,6 +40,9 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 3cfeb1629f79..362805ddf377 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input, tool = MT_TOOL_DIAL; else if (unlikely(!confidence_state)) { tool = MT_TOOL_PALM; - if (!active && + if (!active && mt && input_mt_is_active(&mt->slots[slotnum])) { /* * The non-confidence was reported for @@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_LG, HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, + { .driver_data = MT_CLS_LG, + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index d1b39c29e353..0e7b2d998395 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -174,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, { 0 } }; diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 8dae0f9b819e..6286204d4c56 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev, if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver connected"); + /* If using a wireless adaptor ask for connection status */ + steam->connected = false; steam_request_conn_status(steam); } else { + /* A wired connection is always present */ + steam->connected = true; ret = steam_register(steam); if (ret) { hid_err(hdev, diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index c3fc0ceb8096..7a75aff78388 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -249,13 +249,14 @@ out: static __poll_t hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */ poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) - return EPOLLIN | EPOLLRDNORM | EPOLLOUT; + mask |= EPOLLIN | EPOLLRDNORM; if (!list->hidraw->exist) - return EPOLLERR | EPOLLHUP; - return 0; + mask |= EPOLLERR | EPOLLHUP; + return mask; } static int hidraw_open(struct inode *inode, struct file *file) diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index a358e61fbc82..009000c5d55c 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -49,6 +49,8 @@ #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) +#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) + /* flags */ #define I2C_HID_STARTED 0 @@ -175,6 +177,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_BOGUS_IRQ }, { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, I2C_HID_QUIRK_RESET_ON_RESUME }, + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, + I2C_HID_QUIRK_BAD_INPUT_SIZE }, { 0, 0 } }; @@ -496,9 +500,15 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) } if ((ret_size > size) || (ret_size < 2)) { - dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", - __func__, size, ret_size); - return; + if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) { + ihid->inbuf[0] = size & 0xff; + ihid->inbuf[1] = size >> 8; + ret_size = size; + } else { + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", + __func__, size, ret_size); + return; + } } i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf); diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 6c1e6110867f..1fb294ca463e 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -24,7 +24,9 @@ #define ICL_MOBILE_DEVICE_ID 0x34FC #define SPT_H_DEVICE_ID 0xA135 #define CML_LP_DEVICE_ID 0x02FC +#define CMP_H_DEVICE_ID 0x06FC #define EHL_Ax_DEVICE_ID 0x4BB3 +#define TGL_LP_DEVICE_ID 0xA0FC #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 784dcc8c7022..f491d8b4e24c 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index fa0cc0899827..8fe3efcb8327 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -766,13 +766,14 @@ unlock: static __poll_t uhid_char_poll(struct file *file, poll_table *wait) { struct uhid_device *uhid = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */ poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) - return EPOLLIN | EPOLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; - return 0; + return mask; } static const struct file_operations uhid_fops = { diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index e421cdf2d1a4..a970b809d778 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file) return 0; } +static int __hiddev_open(struct hiddev *hiddev, struct file *file) +{ + struct hiddev_list *list; + int error; + + lockdep_assert_held(&hiddev->existancelock); + + list = vzalloc(sizeof(*list)); + if (!list) + return -ENOMEM; + + mutex_init(&list->thread_lock); + list->hiddev = hiddev; + + if (!hiddev->open++) { + error = hid_hw_power(hiddev->hid, PM_HINT_FULLON); + if (error < 0) + goto err_drop_count; + + error = hid_hw_open(hiddev->hid); + if (error < 0) + goto err_normal_power; + } + + spin_lock_irq(&hiddev->list_lock); + list_add_tail(&list->node, &hiddev->list); + spin_unlock_irq(&hiddev->list_lock); + + file->private_data = list; + + return 0; + +err_normal_power: + hid_hw_power(hiddev->hid, PM_HINT_NORMAL); +err_drop_count: + hiddev->open--; + vfree(list); + return error; +} + /* * open file op */ static int hiddev_open(struct inode *inode, struct file *file) { - struct hiddev_list *list; struct usb_interface *intf; struct hid_device *hid; struct hiddev *hiddev; @@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file) intf = usbhid_find_interface(iminor(inode)); if (!intf) return -ENODEV; + hid = usb_get_intfdata(intf); hiddev = hid->hiddev; - if (!(list = vzalloc(sizeof(struct hiddev_list)))) - return -ENOMEM; - mutex_init(&list->thread_lock); - list->hiddev = hiddev; - file->private_data = list; - - /* - * no need for locking because the USB major number - * is shared which usbcore guards against disconnect - */ - if (list->hiddev->exist) { - if (!list->hiddev->open++) { - res = hid_hw_open(hiddev->hid); - if (res < 0) - goto bail; - } - } else { - res = -ENODEV; - goto bail; - } - - spin_lock_irq(&list->hiddev->list_lock); - list_add_tail(&list->node, &hiddev->list); - spin_unlock_irq(&list->hiddev->list_lock); - mutex_lock(&hiddev->existancelock); - /* - * recheck exist with existance lock held to - * avoid opening a disconnected device - */ - if (!list->hiddev->exist) { - res = -ENODEV; - goto bail_unlock; - } - if (!list->hiddev->open++) - if (list->hiddev->exist) { - struct hid_device *hid = hiddev->hid; - res = hid_hw_power(hid, PM_HINT_FULLON); - if (res < 0) - goto bail_unlock; - res = hid_hw_open(hid); - if (res < 0) - goto bail_normal_power; - } - mutex_unlock(&hiddev->existancelock); - return 0; -bail_normal_power: - hid_hw_power(hid, PM_HINT_NORMAL); -bail_unlock: + res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV; mutex_unlock(&hiddev->existancelock); - spin_lock_irq(&list->hiddev->list_lock); - list_del(&list->node); - spin_unlock_irq(&list->hiddev->list_lock); -bail: - file->private_data = NULL; - vfree(list); return res; } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index ccb74529bc78..d99a9d407671 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */ hdev->product == 0x392 || /* Intuos Pro 2 */ - hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */ + hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */ + hdev->product == 0x3AA)) { /* MobileStudio Pro */ value = (field->logical_maximum - value); if (hdev->product == 0x357 || hdev->product == 0x358 || hdev->product == 0x392) value = wacom_offset_rotation(input, usage, value, 3, 16); else if (hdev->product == 0x34d || hdev->product == 0x34e || - hdev->product == 0x398 || hdev->product == 0x399) + hdev->product == 0x398 || hdev->product == 0x399 || + hdev->product == 0x3AA) value = wacom_offset_rotation(input, usage, value, 1, 2); } else { diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index dc3f507e7562..a90d757f7043 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1132,7 +1132,6 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) drvdata->trcid = coresight_get_trace_id(drvdata->cpu); } -#ifdef CONFIG_CPU_PM static int etm4_cpu_save(struct etmv4_drvdata *drvdata) { int i, ret = 0; @@ -1402,17 +1401,17 @@ static struct notifier_block etm4_cpu_pm_nb = { static int etm4_cpu_pm_register(void) { - return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + if (IS_ENABLED(CONFIG_CPU_PM)) + return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + + return 0; } static void etm4_cpu_pm_unregister(void) { - cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + if (IS_ENABLED(CONFIG_CPU_PM)) + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); } -#else -static int etm4_cpu_pm_register(void) { return 0; } -static void etm4_cpu_pm_unregister(void) { } -#endif static int etm4_probe(struct amba_device *adev, const struct amba_id *id) { diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 0dfd97bbde9e..ca232ec565e8 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -834,9 +834,6 @@ static irqreturn_t intel_th_irq(int irq, void *data) ret |= d->irq(th->thdev[i]); } - if (ret == IRQ_NONE) - pr_warn_ratelimited("nobody cared for irq\n"); - return ret; } @@ -887,6 +884,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, if (th->irq == -1) th->irq = devres[r].start; + th->num_irqs++; break; default: dev_warn(dev, "Unknown resource type %lx\n", @@ -940,6 +938,9 @@ void intel_th_free(struct intel_th *th) th->num_thdevs = 0; + for (i = 0; i < th->num_irqs; i++) + devm_free_irq(th->dev, th->irq + i, th); + pm_runtime_get_sync(th->dev); pm_runtime_forbid(th->dev); diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 0df480072b6c..6f4f5486fe6d 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -261,6 +261,7 @@ enum th_mmio_idx { * @num_thdevs: number of devices in the @thdev array * @num_resources: number of resources in the @resource array * @irq: irq number + * @num_irqs: number of IRQs is use * @id: this Intel TH controller's device ID in the system * @major: device node major for output devices */ @@ -277,6 +278,7 @@ struct intel_th { unsigned int num_thdevs; unsigned int num_resources; int irq; + int num_irqs; int id; int major; diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 6d240dfae9d9..8e48c7458aa3 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -1676,10 +1676,13 @@ static int intel_th_msc_init(struct msc *msc) return 0; } -static void msc_win_switch(struct msc *msc) +static int msc_win_switch(struct msc *msc) { struct msc_window *first; + if (list_empty(&msc->win_list)) + return -EINVAL; + first = list_first_entry(&msc->win_list, struct msc_window, entry); if (msc_is_last_win(msc->cur_win)) @@ -1691,6 +1694,8 @@ static void msc_win_switch(struct msc *msc) msc->base_addr = msc_win_base_dma(msc->cur_win); intel_th_trace_switch(msc->thdev); + + return 0; } /** @@ -2025,16 +2030,15 @@ win_switch_store(struct device *dev, struct device_attribute *attr, if (val != 1) return -EINVAL; + ret = -EINVAL; mutex_lock(&msc->buf_mutex); /* * Window switch can only happen in the "multi" mode. * If a external buffer is engaged, they have the full * control over window switching. */ - if (msc->mode != MSC_MODE_MULTI || msc->mbuf) - ret = -ENOTSUPP; - else - msc_win_switch(msc); + if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) + ret = msc_win_switch(msc); mutex_unlock(&msc->buf_mutex); return ret ? ret : size; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index ebf3e30e989a..e9d90b53bbc4 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -205,6 +205,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Comet Lake PCH-V */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Ice Lake NNPI */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), .driver_data = (kernel_ulong_t)&intel_th_2x, @@ -229,6 +234,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Elkhart Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index e13af4874976..5137e6297022 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -174,7 +174,7 @@ static struct at91_twi_pdata sama5d2_config = { static struct at91_twi_pdata sam9x60_config = { .clk_max_div = 7, - .clk_offset = 4, + .clk_offset = 3, .has_unre_flag = true, .has_alt_cmd = true, .has_hold_field = true, diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index e01b2b57e724..5ab901ad615d 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -58,6 +58,7 @@ struct bcm2835_i2c_dev { struct i2c_adapter adapter; struct completion completion; struct i2c_msg *curr_msg; + struct clk *bus_clk; int num_msgs; u32 msg_err; u8 *msg_buf; @@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) struct resource *mem, *irq; int ret; struct i2c_adapter *adap; - struct clk *bus_clk; struct clk *mclk; u32 bus_clk_rate; @@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) return PTR_ERR(mclk); } - bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); + i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); - if (IS_ERR(bus_clk)) { + if (IS_ERR(i2c_dev->bus_clk)) { dev_err(&pdev->dev, "Could not register clock\n"); - return PTR_ERR(bus_clk); + return PTR_ERR(i2c_dev->bus_clk); } ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", @@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) bus_clk_rate = 100000; } - ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate); + ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate); if (ret < 0) { dev_err(&pdev->dev, "Could not set clock frequency\n"); return ret; } - ret = clk_prepare_enable(bus_clk); + ret = clk_prepare_enable(i2c_dev->bus_clk); if (ret) { dev_err(&pdev->dev, "Couldn't prepare clock"); return ret; @@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) static int bcm2835_i2c_remove(struct platform_device *pdev) { struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev); - struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div"); - clk_rate_exclusive_put(bus_clk); - clk_disable_unprepare(bus_clk); + clk_rate_exclusive_put(i2c_dev->bus_clk); + clk_disable_unprepare(i2c_dev->bus_clk); free_irq(i2c_dev->irq, i2c_dev); i2c_del_adapter(&i2c_dev->adapter); diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 38556381f4ca..2f8b8050a223 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev) adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev, "scl", GPIOD_ASIS); - if (IS_ERR(adapter_data->gpio_scl)) - return PTR_ERR(adapter_data->gpio_scl); + if (IS_ERR(adapter_data->gpio_scl)) { + ret = PTR_ERR(adapter_data->gpio_scl); + goto free_both; + } adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev, "sda", GPIOD_ASIS); - if (IS_ERR(adapter_data->gpio_sda)) - return PTR_ERR(adapter_data->gpio_sda); + if (IS_ERR(adapter_data->gpio_sda)) { + ret = PTR_ERR(adapter_data->gpio_sda); + goto free_both; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index a98bf31d0e5c..61339c665ebd 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1608,14 +1608,18 @@ static int tegra_i2c_probe(struct platform_device *pdev) } pm_runtime_enable(&pdev->dev); - if (!pm_runtime_enabled(&pdev->dev)) + if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra_i2c_runtime_resume(&pdev->dev); - else + if (ret < 0) { + dev_err(&pdev->dev, "runtime resume failed\n"); + goto unprepare_div_clk; + } + } else { ret = pm_runtime_get_sync(i2c_dev->dev); - - if (ret < 0) { - dev_err(&pdev->dev, "runtime resume failed\n"); - goto unprepare_div_clk; + if (ret < 0) { + dev_err(&pdev->dev, "runtime resume failed\n"); + goto disable_rpm; + } } if (i2c_dev->is_multimaster_mode) { @@ -1623,7 +1627,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) if (ret < 0) { dev_err(i2c_dev->dev, "div_clk enable failed %d\n", ret); - goto disable_rpm; + goto put_rpm; } } @@ -1671,11 +1675,16 @@ disable_div_clk: if (i2c_dev->is_multimaster_mode) clk_disable(i2c_dev->div_clk); -disable_rpm: - pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) +put_rpm: + if (pm_runtime_enabled(&pdev->dev)) + pm_runtime_put_sync(&pdev->dev); + else tegra_i2c_runtime_suspend(&pdev->dev); +disable_rpm: + if (pm_runtime_enabled(&pdev->dev)) + pm_runtime_disable(&pdev->dev); + unprepare_div_clk: clk_unprepare(i2c_dev->div_clk); @@ -1710,9 +1719,14 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int err; i2c_mark_adapter_suspended(&i2c_dev->adapter); + err = pm_runtime_force_suspend(dev); + if (err < 0) + return err; + return 0; } @@ -1733,6 +1747,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err; + err = pm_runtime_force_resume(dev); + if (err < 0) + return err; + i2c_mark_adapter_resumed(&i2c_dev->adapter); return 0; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 9f8dcd3f8385..35b209797d7b 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -186,10 +186,11 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) * If we can set SDA, we will always create a STOP to ensure additional * pulses will do no harm. This is achieved by letting SDA follow SCL * half a cycle later. Check the 'incomplete_write_byte' fault injector - * for details. + * for details. Note that we must honour tsu:sto, 4us, but lets use 5us + * here for simplicity. */ bri->set_scl(adap, scl); - ndelay(RECOVERY_NDELAY / 2); + ndelay(RECOVERY_NDELAY); if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); @@ -211,7 +212,13 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) scl = !scl; bri->set_scl(adap, scl); /* Creating STOP again, see above */ - ndelay(RECOVERY_NDELAY / 2); + if (scl) { + /* Honour minimum tsu:sto */ + ndelay(RECOVERY_NDELAY); + } else { + /* Honour minimum tf and thd:dat */ + ndelay(RECOVERY_NDELAY / 2); + } if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 3f03abf100b5..306bf15023a7 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -494,13 +494,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev, st->channel_config[channel].buf_negative = of_property_read_bool(child, "adi,buffered-negative"); - *chan = ad7124_channel_template; - chan->address = channel; - chan->scan_index = channel; - chan->channel = ain[0]; - chan->channel2 = ain[1]; - - chan++; + chan[channel] = ad7124_channel_template; + chan[channel].address = channel; + chan[channel].scan_index = channel; + chan[channel].channel = ain[0]; + chan[channel].channel2 = ain[1]; } return 0; diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index fa4586037bb8..0b91de4df8f4 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -65,6 +65,7 @@ config IAQCORE config PMS7003 tristate "Plantower PMS7003 particulate matter sensor" depends on SERIAL_DEV_BUS + select IIO_BUFFER select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Plantower PMS7003 particulate diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index a7d40c02ce6b..b921dd9e108f 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1301,7 +1301,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id, for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) { for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) { - if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id) + if (st_lsm6dsx_sensor_settings[i].id[j].name && + id == st_lsm6dsx_sensor_settings[i].id[j].hw_id) break; } if (j < ST_LSM6DSX_MAX_ID) diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index c193d64e5217..112225c0e486 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const unsigned long *mask, bool timestamp) { unsigned bytes = 0; - int length, i; + int length, i, largest = 0; /* How much space will the demuxed element take? */ for_each_set_bit(i, mask, @@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, length = iio_storage_bytes_for_si(indio_dev, i); bytes = ALIGN(bytes, length); bytes += length; + largest = max(largest, length); } if (timestamp) { length = iio_storage_bytes_for_timestamp(indio_dev); bytes = ALIGN(bytes, length); bytes += length; + largest = max(largest, length); } + + bytes = ALIGN(bytes, largest); return bytes; } diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index 16dacea9eadf..b0e241aaefb4 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -163,7 +163,6 @@ static int vcnl4200_init(struct vcnl4000_data *data) if (ret < 0) return ret; - data->al_scale = 24000; data->vcnl4200_al.reg = VCNL4200_AL_DATA; data->vcnl4200_ps.reg = VCNL4200_PS_DATA; switch (id) { @@ -172,11 +171,13 @@ static int vcnl4200_init(struct vcnl4000_data *data) /* show 54ms in total. */ data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000); data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000); + data->al_scale = 24000; break; case VCNL4040_PROD_ID: /* Integration time is 80ms, add 10ms. */ data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000); data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000); + data->al_scale = 120000; break; } data->vcnl4200_al.last_measurement = ktime_set(0, 0); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 9b6ca15a183c..ad5112a2325f 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) int rc; rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 958c1ff9c515..4d07d22bfa7b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Add qp to flush list of the CQ */ bnxt_qplib_add_flush_qp(qp); } else { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { - /* Before we complete, do WA 9060 */ - if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, - cqe_sq_cons)) { - *lib_qp = qp; - goto out; - } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c index adb4a1ba921b..5836fe7b2817 100644 --- a/drivers/infiniband/hw/hfi1/iowait.c +++ b/drivers/infiniband/hw/hfi1/iowait.c @@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit, void iowait_cancel_work(struct iowait *w) { cancel_work_sync(&iowait_get_ib_work(w)->iowork); - cancel_work_sync(&iowait_get_tid_work(w)->iowork); + /* Make sure that the iowork for TID RDMA is used */ + if (iowait_get_tid_work(w)->iowork.func) + cancel_work_sync(&iowait_get_tid_work(w)->iowork); } /** diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index e53f542b60af..8a2e0d9351e9 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) */ fpsn = full_flow_psn(flow, flow->flow_state.spsn); req->r_ack_psn = psn; + /* + * If resync_psn points to the last flow PSN for a + * segment and the new segment (likely from a new + * request) starts with a new generation number, we + * need to adjust resync_psn accordingly. + */ + if (flow->flow_state.generation != + (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT)) + resync_psn = mask_psn(fpsn - 1); flow->resync_npkts += delta_psn(mask_psn(resync_psn + 1), fpsn); /* diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 86375947bc67..dbd96d029d8b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context) static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct i40iw_ucontext *ucontext; - u64 db_addr_offset; - u64 push_offset; + u64 db_addr_offset, push_offset, pfn; ucontext = to_ucontext(context); if (ucontext->iwdev->sc_dev.is_pf) { @@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_private_data = ucontext; } else { if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); } - if (io_remap_pfn_range(vma, vma->vm_start, - vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), - PAGE_SIZE, vma->vm_page_prot)) - return -EAGAIN; + pfn = vma->vm_pgoff + + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> + PAGE_SHIFT); - return 0; + return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + vma->vm_page_prot, NULL); } /** diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index d7dd6fcf2db0..f918fca9ada3 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -224,13 +224,13 @@ static void __pass_event(struct evdev_client *client, */ client->tail = (client->head - 2) & (client->bufsize - 1); - client->buffer[client->tail].input_event_sec = - event->input_event_sec; - client->buffer[client->tail].input_event_usec = - event->input_event_usec; - client->buffer[client->tail].type = EV_SYN; - client->buffer[client->tail].code = SYN_DROPPED; - client->buffer[client->tail].value = 0; + client->buffer[client->tail] = (struct input_event) { + .input_event_sec = event->input_event_sec, + .input_event_usec = event->input_event_usec, + .type = EV_SYN, + .code = SYN_DROPPED, + .value = 0, + }; client->packet_head = client->tail; } diff --git a/drivers/input/input.c b/drivers/input/input.c index 55086279d044..ee6c3234df36 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -878,16 +878,18 @@ static int input_default_setkeycode(struct input_dev *dev, } } - __clear_bit(*old_keycode, dev->keybit); - __set_bit(ke->keycode, dev->keybit); - - for (i = 0; i < dev->keycodemax; i++) { - if (input_fetch_keycode(dev, i) == *old_keycode) { - __set_bit(*old_keycode, dev->keybit); - break; /* Setting the bit twice is useless, so break */ + if (*old_keycode <= KEY_MAX) { + __clear_bit(*old_keycode, dev->keybit); + for (i = 0; i < dev->keycodemax; i++) { + if (input_fetch_keycode(dev, i) == *old_keycode) { + __set_bit(*old_keycode, dev->keybit); + /* Setting the bit twice is useless, so break */ + break; + } } } + __set_bit(ke->keycode, dev->keybit); return 0; } @@ -943,9 +945,13 @@ int input_set_keycode(struct input_dev *dev, * Simulate keyup event if keycode is not present * in the keymap anymore */ - if (test_bit(EV_KEY, dev->evbit) && - !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && - __test_and_clear_bit(old_keycode, dev->key)) { + if (old_keycode > KEY_MAX) { + dev_warn(dev->dev.parent ?: &dev->dev, + "%s: got too big old keycode %#x\n", + __func__, old_keycode); + } else if (test_bit(EV_KEY, dev->evbit) && + !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && + __test_and_clear_bit(old_keycode, dev->key)) { struct input_value vals[] = { { EV_KEY, old_keycode, 0 }, input_value_sync diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c index 53799527dc75..9f809aeb785c 100644 --- a/drivers/input/keyboard/imx_sc_key.c +++ b/drivers/input/keyboard/imx_sc_key.c @@ -78,7 +78,13 @@ static void imx_sc_check_for_events(struct work_struct *work) return; } - state = (bool)msg.state; + /* + * The response data from SCU firmware is 4 bytes, + * but ONLY the first byte is the key state, other + * 3 bytes could be some dirty data, so we should + * ONLY take the first byte as key state. + */ + state = (bool)(msg.state & 0xff); if (state ^ priv->keystate) { priv->keystate = state; diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index fd253781be71..f2593133e524 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -74,12 +74,16 @@ static int uinput_dev_event(struct input_dev *dev, struct uinput_device *udev = input_get_drvdata(dev); struct timespec64 ts; - udev->buff[udev->head].type = type; - udev->buff[udev->head].code = code; - udev->buff[udev->head].value = value; ktime_get_ts64(&ts); - udev->buff[udev->head].input_event_sec = ts.tv_sec; - udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC; + + udev->buff[udev->head] = (struct input_event) { + .input_event_sec = ts.tv_sec, + .input_event_usec = ts.tv_nsec / NSEC_PER_USEC, + .type = type, + .code = code, + .value = value, + }; + udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE; wake_up_interruptible(&udev->waitq); @@ -689,13 +693,14 @@ static ssize_t uinput_read(struct file *file, char __user *buffer, static __poll_t uinput_poll(struct file *file, poll_table *wait) { struct uinput_device *udev = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */ poll_wait(file, &udev->waitq, wait); if (udev->head != udev->tail) - return EPOLLIN | EPOLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; - return EPOLLOUT | EPOLLWRNORM; + return mask; } static int uinput_release(struct inode *inode, struct file *file) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 0cc702a70a96..a2e96a5fd9a7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -19,6 +19,7 @@ #include <linux/iova.h> #include <linux/irq.h> #include <linux/mm.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> @@ -44,7 +45,6 @@ struct iommu_dma_cookie { dma_addr_t msi_iova; }; struct list_head msi_page_list; - spinlock_t msi_lock; /* Domain for flush queue callback; NULL if flush queue not in use */ struct iommu_domain *fq_domain; @@ -63,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { - spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->type = type; } @@ -399,7 +398,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, } static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, - size_t size, dma_addr_t dma_limit, struct device *dev) + size_t size, u64 dma_limit, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; @@ -424,7 +423,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); if (domain->geometry.force_aperture) - dma_limit = min(dma_limit, domain->geometry.aperture_end); + dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); /* Try to get PCI devices a SAC address */ if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) @@ -477,7 +476,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot, dma_addr_t dma_mask) + size_t size, int prot, u64 dma_mask) { struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -1176,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (msi_page->phys == msi_addr) return msi_page; - msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); + msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); if (!msi_page) return NULL; @@ -1204,25 +1203,22 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) { struct device *dev = msi_desc_to_dev(desc); struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iommu_dma_cookie *cookie; struct iommu_dma_msi_page *msi_page; - unsigned long flags; + static DEFINE_MUTEX(msi_prepare_lock); /* see below */ if (!domain || !domain->iova_cookie) { desc->iommu_cookie = NULL; return 0; } - cookie = domain->iova_cookie; - /* - * We disable IRQs to rule out a possible inversion against - * irq_desc_lock if, say, someone tries to retarget the affinity - * of an MSI from within an IPI handler. + * In fact the whole prepare operation should already be serialised by + * irq_domain_mutex further up the callchain, but that's pretty subtle + * on its own, so consider this locking as failsafe documentation... */ - spin_lock_irqsave(&cookie->msi_lock, flags); + mutex_lock(&msi_prepare_lock); msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); - spin_unlock_irqrestore(&cookie->msi_lock, flags); + mutex_unlock(&msi_prepare_lock); msi_desc_set_iommu_cookie(desc, msi_page); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0c8d81f56a30..1801f0aaf013 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5478,9 +5478,6 @@ static int intel_iommu_map(struct iommu_domain *domain, int prot = 0; int ret; - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return -EINVAL; - if (iommu_prot & IOMMU_READ) prot |= DMA_PTE_READ; if (iommu_prot & IOMMU_WRITE) @@ -5523,8 +5520,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, /* Cope with horrid API which requires us to unmap more than the size argument if it happens to be a large-page mapping. */ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)); - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return 0; if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) size = VTD_PAGE_SIZE << level_to_offset_bits(level); @@ -5556,9 +5551,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, int level = 0; u64 phys = 0; - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return 0; - pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); if (pte) phys = dma_pte_addr(pte); @@ -5632,8 +5624,10 @@ static int intel_iommu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto unlink; + } iommu_group_put(group); @@ -5659,7 +5653,8 @@ static int intel_iommu_add_device(struct device *dev) if (!get_private_domain_for_dev(dev)) { dev_warn(dev, "Failed to get a private domain.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto unlink; } dev_info(dev, @@ -5674,6 +5669,10 @@ static int intel_iommu_add_device(struct device *dev) } return 0; + +unlink: + iommu_device_unlink(&iommu->iommu, dev); + return ret; } static void intel_iommu_remove_device(struct device *dev) @@ -5736,8 +5735,8 @@ static void intel_iommu_get_resv_regions(struct device *device, struct pci_dev *pdev = to_pci_dev(device); if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { - reg = iommu_alloc_resv_region(0, 1UL << 24, 0, - IOMMU_RESV_DIRECT); + reg = iommu_alloc_resv_region(0, 1UL << 24, prot, + IOMMU_RESV_DIRECT_RELAXABLE); if (reg) list_add_tail(®->list, head); } @@ -5825,6 +5824,13 @@ static void intel_iommu_apply_resv_region(struct device *dev, WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); } +static struct iommu_group *intel_iommu_device_group(struct device *dev) +{ + if (dev_is_pci(dev)) + return pci_device_group(dev); + return generic_device_group(dev); +} + #ifdef CONFIG_INTEL_IOMMU_SVM struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) { @@ -5997,7 +6003,7 @@ const struct iommu_ops intel_iommu_ops = { .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = intel_iommu_put_resv_regions, .apply_resv_region = intel_iommu_apply_resv_region, - .device_group = pci_device_group, + .device_group = intel_iommu_device_group, .dev_has_feat = intel_iommu_dev_has_feat, .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 9b159132405d..dca88f9fdf29 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -104,11 +104,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d { struct qi_desc desc; - /* - * Do PASID granu IOTLB invalidation if page selective capability is - * not available. - */ - if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) { + if (pages == -1) { desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index db7bfd4f2d20..3ead597e1c57 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -312,8 +312,8 @@ int iommu_insert_resv_region(struct iommu_resv_region *new, list_for_each_entry_safe(iter, tmp, regions, list) { phys_addr_t top_end, iter_end = iter->start + iter->length - 1; - /* no merge needed on elements of different types than @nr */ - if (iter->type != nr->type) { + /* no merge needed on elements of different types than @new */ + if (iter->type != new->type) { list_move_tail(&iter->list, &stack); continue; } @@ -751,6 +751,7 @@ err_put_group: mutex_unlock(&group->mutex); dev->iommu_group = NULL; kobject_put(group->devices_kobj); + sysfs_remove_link(group->devices_kobj, device->name); err_free_name: kfree(device->name); err_remove_link: @@ -2282,13 +2283,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) goto out; } - iommu_group_create_direct_mappings(group, dev); - /* Make the domain the default for this group */ if (group->default_domain) iommu_domain_free(group->default_domain); group->default_domain = domain; + iommu_group_create_direct_mappings(group, dev); + dev_info(dev, "Using iommu %s mapping\n", type == IOMMU_DOMAIN_DMA ? "dma" : "direct"); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 41c605b0058f..c7a914b9bbbc 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex); struct iova *alloc_iova_mem(void) { - return kmem_cache_alloc(iova_cache, GFP_ATOMIC); + return kmem_cache_zalloc(iova_cache, GFP_ATOMIC); } EXPORT_SYMBOL(alloc_iova_mem); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index 01d18b39069e..c5589ee0dfb3 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -17,7 +17,6 @@ #include <linux/delay.h> #include <asm/io.h> -#include <asm/mach-jz4740/irq.h> struct ingenic_intc_data { void __iomem *base; @@ -50,7 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data) while (pending) { int bit = __fls(pending); - irq = irq_find_mapping(domain, bit + (i * 32)); + irq = irq_linear_revmap(domain, bit + (i * 32)); generic_handle_irq(irq); pending &= ~BIT(bit); } @@ -97,8 +96,7 @@ static int __init ingenic_intc_of_init(struct device_node *node, goto out_unmap_irq; } - domain = irq_domain_add_legacy(node, num_chips * 32, - JZ4740_IRQ_BASE, 0, + domain = irq_domain_add_linear(node, num_chips * 32, &irq_generic_chip_ops, NULL); if (!domain) { err = -ENOMEM; diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 8df547d2d935..0aca5807a119 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -256,7 +256,7 @@ static int __init plic_init(struct device_node *node, * Skip contexts other than external interrupts for our * privilege level. */ - if (parent.args[0] != IRQ_EXT) + if (parent.args[0] != RV_IRQ_EXT) continue; hartid = plic_find_hart_id(parent.np); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3c50c4e4da8f..963d3774c93e 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -17,7 +17,7 @@ #include <linux/dm-bufio.h> #define DM_MSG_PREFIX "persistent snapshot" -#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ +#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */ #define DM_PREFETCH_CHUNKS 12 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index b7c20979bd19..322386ff5d22 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) char b[BDEVNAME_SIZE]; char b2[BDEVNAME_SIZE]; struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); - unsigned short blksize = 512; + unsigned blksize = 512; *private_conf = ERR_PTR(-ENOMEM); if (!conf) diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 9340435a94a0..6c95dc471d4c 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -380,7 +380,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status) } else { list_del_init(&data->list); if (!(data->msg.tx_status & CEC_TX_STATUS_OK)) - data->adap->transmit_queue_sz--; + if (!WARN_ON(!data->adap->transmit_queue_sz)) + data->adap->transmit_queue_sz--; } if (data->msg.tx_status & CEC_TX_STATUS_OK) { @@ -432,6 +433,14 @@ static void cec_flush(struct cec_adapter *adap) * need to do anything special in that case. */ } + /* + * If something went wrong and this counter isn't what it should + * be, then this will reset it back to 0. Warn if it is not 0, + * since it indicates a bug, either in this framework or in a + * CEC driver. + */ + if (WARN_ON(adap->transmit_queue_sz)) + adap->transmit_queue_sz = 0; } /* @@ -456,7 +465,7 @@ int cec_thread_func(void *_adap) bool timeout = false; u8 attempts; - if (adap->transmitting) { + if (adap->transmit_in_progress) { int err; /* @@ -491,7 +500,7 @@ int cec_thread_func(void *_adap) goto unlock; } - if (adap->transmitting && timeout) { + if (adap->transmit_in_progress && timeout) { /* * If we timeout, then log that. Normally this does * not happen and it is an indication of a faulty CEC @@ -500,14 +509,18 @@ int cec_thread_func(void *_adap) * so much traffic on the bus that the adapter was * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s). */ - pr_warn("cec-%s: message %*ph timed out\n", adap->name, - adap->transmitting->msg.len, - adap->transmitting->msg.msg); + if (adap->transmitting) { + pr_warn("cec-%s: message %*ph timed out\n", adap->name, + adap->transmitting->msg.len, + adap->transmitting->msg.msg); + /* Just give up on this. */ + cec_data_cancel(adap->transmitting, + CEC_TX_STATUS_TIMEOUT); + } else { + pr_warn("cec-%s: transmit timed out\n", adap->name); + } adap->transmit_in_progress = false; adap->tx_timeouts++; - /* Just give up on this. */ - cec_data_cancel(adap->transmitting, - CEC_TX_STATUS_TIMEOUT); goto unlock; } @@ -522,7 +535,8 @@ int cec_thread_func(void *_adap) data = list_first_entry(&adap->transmit_queue, struct cec_data, list); list_del_init(&data->list); - adap->transmit_queue_sz--; + if (!WARN_ON(!data->adap->transmit_queue_sz)) + adap->transmit_queue_sz--; /* Make this the current transmitting message */ adap->transmitting = data; @@ -1085,11 +1099,11 @@ void cec_received_msg_ts(struct cec_adapter *adap, valid_la = false; else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED)) valid_la = false; - else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4)) + else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST)) valid_la = false; else if (cec_msg_is_broadcast(msg) && - adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 && - !(dir_fl & BCAST2_0)) + adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 && + !(dir_fl & BCAST1_4)) valid_la = false; } if (valid_la && min_len) { diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index ac88ade94cda..59609556d969 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c @@ -116,6 +116,7 @@ struct pulse8 { unsigned int vers; struct completion cmd_done; struct work_struct work; + u8 work_result; struct delayed_work ping_eeprom_work; struct cec_msg rx_msg; u8 data[DATA_SIZE]; @@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work) { struct pulse8 *pulse8 = container_of(work, struct pulse8, work); + u8 result = pulse8->work_result; - switch (pulse8->data[0] & 0x3f) { + pulse8->work_result = 0; + switch (result & 0x3f) { case MSGCODE_FRAME_DATA: cec_received_msg(pulse8->adap, &pulse8->rx_msg); break; @@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, pulse8->escape = false; } else if (data == MSGEND) { struct cec_msg *msg = &pulse8->rx_msg; + u8 msgcode = pulse8->buf[0]; if (debug) dev_info(pulse8->dev, "received: %*ph\n", pulse8->idx, pulse8->buf); - pulse8->data[0] = pulse8->buf[0]; - switch (pulse8->buf[0] & 0x3f) { + switch (msgcode & 0x3f) { case MSGCODE_FRAME_START: msg->len = 1; msg->msg[0] = pulse8->buf[1]; @@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, if (msg->len == CEC_MAX_MSG_SIZE) break; msg->msg[msg->len++] = pulse8->buf[1]; - if (pulse8->buf[0] & MSGCODE_FRAME_EOM) + if (msgcode & MSGCODE_FRAME_EOM) { + WARN_ON(pulse8->work_result); + pulse8->work_result = msgcode; schedule_work(&pulse8->work); + break; + } break; case MSGCODE_TRANSMIT_SUCCEEDED: case MSGCODE_TRANSMIT_FAILED_LINE: case MSGCODE_TRANSMIT_FAILED_ACK: case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: + WARN_ON(pulse8->work_result); + pulse8->work_result = msgcode; schedule_work(&pulse8->work); break; case MSGCODE_HIGH_ERROR: diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index f9ac22413000..1074b882c57c 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -100,19 +100,19 @@ struct buflist { * Function prototypes. Called from OS entry point mptctl_ioctl. * arg contents specific to function. */ -static int mptctl_fw_download(unsigned long arg); -static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd); -static int mptctl_gettargetinfo(unsigned long arg); -static int mptctl_readtest(unsigned long arg); -static int mptctl_mpt_command(unsigned long arg); -static int mptctl_eventquery(unsigned long arg); -static int mptctl_eventenable(unsigned long arg); -static int mptctl_eventreport(unsigned long arg); -static int mptctl_replace_fw(unsigned long arg); - -static int mptctl_do_reset(unsigned long arg); -static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); -static int mptctl_hp_targetinfo(unsigned long arg); +static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg); + +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg); static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); static void mptctl_remove(struct pci_dev *); @@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg); /* * Private function calls. */ -static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr); -static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen); +static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr); +static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen); static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, @@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { - return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); + return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { - return mptctl_gettargetinfo(arg); + return mptctl_gettargetinfo(iocp, arg); } else if (cmd == MPTTEST) { - return mptctl_readtest(arg); + return mptctl_readtest(iocp, arg); } else if (cmd == MPTEVENTQUERY) { - return mptctl_eventquery(arg); + return mptctl_eventquery(iocp, arg); } else if (cmd == MPTEVENTENABLE) { - return mptctl_eventenable(arg); + return mptctl_eventenable(iocp, arg); } else if (cmd == MPTEVENTREPORT) { - return mptctl_eventreport(arg); + return mptctl_eventreport(iocp, arg); } else if (cmd == MPTFWREPLACE) { - return mptctl_replace_fw(arg); + return mptctl_replace_fw(iocp, arg); } /* All of these commands require an interrupt or @@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; if (cmd == MPTFWDOWNLOAD) - ret = mptctl_fw_download(arg); + ret = mptctl_fw_download(iocp, arg); else if (cmd == MPTCOMMAND) - ret = mptctl_mpt_command(arg); + ret = mptctl_mpt_command(iocp, arg); else if (cmd == MPTHARDRESET) - ret = mptctl_do_reset(arg); + ret = mptctl_do_reset(iocp, arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) - ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); + ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) - ret = mptctl_hp_targetinfo(arg); + ret = mptctl_hp_targetinfo(iocp, arg); else ret = -EINVAL; @@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } -static int mptctl_do_reset(unsigned long arg) +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; - MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " @@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg) return -EFAULT; } - if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", - __FILE__, __LINE__, krinfo.hdr.iocnum); - return -ENODEV; /* (-6) No such device or address */ - } - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); @@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_fw_download(unsigned long arg) +mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; @@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg) return -EFAULT; } - return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); + return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) +mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; - MPT_ADAPTER *iocp; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; @@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; - if (mpt_verify_adapter(ioc, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", - ioc); - return -ENODEV; /* (-6) No such device or address */ - } else { - - /* Valid device. Get a message frame and construct the FW download message. - */ - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) - return -EAGAIN; - } + /* Valid device. Get a message frame and construct the FW download message. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) + return -EAGAIN; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); @@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", - iocp->name, ioc)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; @@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE * -ENODEV if no such device/adapter */ static int -mptctl_getiocinfo (unsigned long arg, unsigned int data_size) +mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; - int iocnum; unsigned int port; int cim_rev; struct scsi_device *sdev; @@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) return PTR_ERR(karg); } - if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - kfree(karg); - return -ENODEV; - } - /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " @@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) * -ENODEV if no such device/adapter */ static int -mptctl_gettargetinfo (unsigned long arg) +mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; - MPT_ADAPTER *ioc; VirtDevice *vdevice; char *pmem; int *pdata; - int iocnum; int numDevices = 0; int lun; int maxWordsLeft; @@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes @@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_readtest (unsigned long arg) +mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_test __user *uarg = (void __user *) arg; struct mpt_ioctl_test karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " @@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", ioc->name)); /* Fill in the data and return the structure to the calling @@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_eventquery (unsigned long arg) +mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " @@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; @@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventenable (unsigned long arg) +mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " @@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { @@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventreport (unsigned long arg) +mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; - MPT_ADAPTER *ioc; - int iocnum; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { @@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); @@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_replace_fw (unsigned long arg) +mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg; struct mpt_ioctl_replace_fw karg; - MPT_ADAPTER *ioc; - int iocnum; int newFwSize; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { @@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n", ioc->name)); /* If caching FW, Free the old FW image @@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg) * -ENOMEM if memory allocation error */ static int -mptctl_mpt_command (unsigned long arg) +mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; - MPT_ADAPTER *ioc; - int iocnum; int rc; @@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - - rc = mptctl_do_mpt_command (karg, &uarg->MF); + rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF); return rc; } @@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg) * -EPERM if SCSI I/O and target is untagged */ static int -mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) +mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr) { - MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; @@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ - int iocnum, flagsLength; + int flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; @@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); @@ -2418,17 +2321,15 @@ done_free_mem: * -ENOMEM if memory allocation error */ static int -mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) +mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; - int iocnum; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; @@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); @@ -2659,15 +2554,13 @@ retry_wait: * -ENOMEM if memory allocation error */ static int -mptctl_hp_targetinfo(unsigned long arg) +mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg) { hp_target_info_t __user *uarg = (void __user *) arg; SCSIDevicePage0_t *pg0_alloc; SCSIDevicePage3_t *pg3_alloc; - MPT_ADAPTER *ioc; MPT_SCSI_HOST *hd = NULL; hp_target_info_t karg; - int iocnum; int data_sz; dma_addr_t page_dma; CONFIGPARMS cfg; @@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } if (karg.hdr.id >= MPT_MAX_FC_DEVICES) return -EINVAL; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", @@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); - ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); @@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd, /* Pass new structure to do_mpt_command */ - ret = mptctl_do_mpt_command (karg, &uarg->MF); + ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 6d27ccfe0680..3c2d405bc79b 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev) cdev = &edev->component[i]; if (cdev->dev == dev) { enclosure_remove_links(cdev); - device_del(&cdev->cdev); put_device(dev); cdev->dev = NULL; - return device_add(&cdev->cdev); + return 0; } } return -ENODEV; diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index 8850f475a413..0bf08678431b 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -824,8 +824,9 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) memset(args, 0, sizeof(*args)); if (rc < 0) { - dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n", - rc, seq); + dev_err_ratelimited(hdev->dev, + "Error %ld on waiting for CS handle %llu\n", + rc, seq); if (rc == -ERESTARTSYS) { args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED; rc = -EINTR; diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c index 17db7b3dfb4c..2df6fb87e7ff 100644 --- a/drivers/misc/habanalabs/context.c +++ b/drivers/misc/habanalabs/context.c @@ -176,7 +176,7 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq) spin_lock(&ctx->cs_lock); if (seq >= ctx->cs_sequence) { - dev_notice(hdev->dev, + dev_notice_ratelimited(hdev->dev, "Can't wait on seq %llu because current CS is at seq %llu\n", seq, ctx->cs_sequence); spin_unlock(&ctx->cs_lock); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index c8d16aa4382c..7344e8a222ae 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2192,7 +2192,7 @@ static int goya_push_linux_to_device(struct hl_device *hdev) static int goya_pldm_init_cpu(struct hl_device *hdev) { - u32 val, unit_rst_val; + u32 unit_rst_val; int rc; /* Must initialize SRAM scrambler before pushing u-boot to SRAM */ @@ -2200,14 +2200,14 @@ static int goya_pldm_init_cpu(struct hl_device *hdev) /* Put ARM cores into reset */ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT); - val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); /* Reset the CA53 MACRO */ unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET); - val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val); - val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); rc = goya_push_uboot_to_device(hdev); if (rc) @@ -2228,7 +2228,7 @@ static int goya_pldm_init_cpu(struct hl_device *hdev) /* Release ARM core 0 from reset */ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_CORE0_DEASSERT); - val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); return 0; } @@ -2502,13 +2502,12 @@ err: static int goya_hw_init(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; - u32 val; int rc; dev_info(hdev->dev, "Starting initialization of H/W\n"); /* Perform read from the device to make sure device is up */ - val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); /* * Let's mark in the H/W that we have reached this point. We check @@ -2560,7 +2559,7 @@ static int goya_hw_init(struct hl_device *hdev) goto disable_queues; /* Perform read from the device to flush all MSI-X configuration */ - val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); return 0; diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index a4fdad04809a..de87693cf557 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -278,7 +278,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void) void lkdtm_UNSET_SMEP(void) { -#ifdef CONFIG_X86_64 +#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) #define MOV_CR4_DEPTH 64 void (*direct_write_cr4)(unsigned long val); unsigned char *insn; @@ -338,13 +338,13 @@ void lkdtm_UNSET_SMEP(void) native_write_cr4(cr4); } #else - pr_err("FAIL: this test is x86_64-only\n"); + pr_err("XFAIL: this test is x86_64-only\n"); #endif } -#ifdef CONFIG_X86_32 void lkdtm_DOUBLE_FAULT(void) { +#ifdef CONFIG_X86_32 /* * Trigger #DF by setting the stack limit to zero. This clobbers * a GDT TLS slot, which is okay because the current task will die @@ -373,6 +373,8 @@ void lkdtm_DOUBLE_FAULT(void) asm volatile ("movw %0, %%ss; addl $0, (%%esp)" :: "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3))); - panic("tried to double fault but didn't die\n"); -} + pr_err("FAIL: tried to double fault but didn't die\n"); +#else + pr_err("XFAIL: this test is ia32-only\n"); #endif +} diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c index 994563a078eb..de8a66b9d76b 100644 --- a/drivers/misc/ocxl/context.c +++ b/drivers/misc/ocxl/context.c @@ -10,18 +10,17 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, int pasid; struct ocxl_context *ctx; - *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL); - if (!*context) + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) return -ENOMEM; - ctx = *context; - ctx->afu = afu; mutex_lock(&afu->contexts_lock); pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, afu->pasid_base + afu->pasid_max, GFP_KERNEL); if (pasid < 0) { mutex_unlock(&afu->contexts_lock); + kfree(ctx); return pasid; } afu->pasid_count++; @@ -43,6 +42,7 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, * duration of the life of the context */ ocxl_afu_get(afu); + *context = ctx; return 0; } EXPORT_SYMBOL_GPL(ocxl_context_alloc); diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 2870c25da166..4d1b44de1492 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -18,18 +18,15 @@ static struct class *ocxl_class; static struct mutex minors_idr_lock; static struct idr minors_idr; -static struct ocxl_file_info *find_file_info(dev_t devno) +static struct ocxl_file_info *find_and_get_file_info(dev_t devno) { struct ocxl_file_info *info; - /* - * We don't declare an RCU critical section here, as our AFU - * is protected by a reference counter on the device. By the time the - * info reference is removed from the idr, the ref count of - * the device is already at 0, so no user API will access that AFU and - * this function can't return it. - */ + mutex_lock(&minors_idr_lock); info = idr_find(&minors_idr, MINOR(devno)); + if (info) + get_device(&info->dev); + mutex_unlock(&minors_idr_lock); return info; } @@ -58,14 +55,16 @@ static int afu_open(struct inode *inode, struct file *file) pr_debug("%s for device %x\n", __func__, inode->i_rdev); - info = find_file_info(inode->i_rdev); + info = find_and_get_file_info(inode->i_rdev); if (!info) return -ENODEV; rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping); - if (rc) + if (rc) { + put_device(&info->dev); return rc; - + } + put_device(&info->dev); file->private_data = ctx; return 0; } @@ -487,7 +486,6 @@ static void info_release(struct device *dev) { struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev); - free_minor(info); ocxl_afu_put(info->afu); kfree(info); } @@ -577,6 +575,7 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu) ocxl_file_make_invisible(info); ocxl_sysfs_unregister_afu(info); + free_minor(info); device_unregister(&info->dev); } diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 189e42674d85..010fe29a4888 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -228,6 +228,7 @@ #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ +#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */ #define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */ #define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */ @@ -1881,6 +1882,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) /* select EMMC50 PAD CMD tune */ sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0)); + sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2); if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || mmc->ios.timing == MMC_TIMING_UHS_SDR104) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index b75c82d8d6c1..3d0bb5e2e09b 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -99,7 +99,7 @@ #define CORE_PWRSAVE_DLL BIT(3) -#define DDR_CONFIG_POR_VAL 0x80040853 +#define DDR_CONFIG_POR_VAL 0x80040873 #define INVALID_TUNING_PHASE -1 @@ -148,8 +148,9 @@ struct sdhci_msm_offset { u32 core_ddr_200_cfg; u32 core_vendor_spec3; u32 core_dll_config_2; + u32 core_dll_config_3; + u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ u32 core_ddr_config; - u32 core_ddr_config_2; }; static const struct sdhci_msm_offset sdhci_msm_v5_offset = { @@ -177,8 +178,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = { .core_ddr_200_cfg = 0x224, .core_vendor_spec3 = 0x250, .core_dll_config_2 = 0x254, - .core_ddr_config = 0x258, - .core_ddr_config_2 = 0x25c, + .core_dll_config_3 = 0x258, + .core_ddr_config = 0x25c, }; static const struct sdhci_msm_offset sdhci_msm_mci_offset = { @@ -207,8 +208,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = { .core_ddr_200_cfg = 0x184, .core_vendor_spec3 = 0x1b0, .core_dll_config_2 = 0x1b4, - .core_ddr_config = 0x1b8, - .core_ddr_config_2 = 0x1bc, + .core_ddr_config_old = 0x1b8, + .core_ddr_config = 0x1bc, }; struct sdhci_msm_variant_ops { @@ -253,6 +254,7 @@ struct sdhci_msm_host { const struct sdhci_msm_offset *offset; bool use_cdr; u32 transfer_mode; + bool updated_ddr_cfg; }; static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) @@ -924,8 +926,10 @@ out: static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) { struct mmc_host *mmc = host->mmc; - u32 dll_status, config; + u32 dll_status, config, ddr_cfg_offset; int ret; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); @@ -938,8 +942,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) * bootloaders. In the future, if this changes, then the desired * values will need to be programmed appropriately. */ - writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + - msm_offset->core_ddr_config); + if (msm_host->updated_ddr_cfg) + ddr_cfg_offset = msm_offset->core_ddr_config; + else + ddr_cfg_offset = msm_offset->core_ddr_config_old; + writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset); if (mmc->ios.enhanced_strobe) { config = readl_relaxed(host->ioaddr + @@ -1899,6 +1906,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_offset->core_vendor_spec_capabilities0); } + if (core_major == 1 && core_minor >= 0x49) + msm_host->updated_ddr_cfg = true; + /* * Power on reset state may trigger power irq if previous status of * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 5cca3fa4610b..500f70a6ee42 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -80,6 +80,7 @@ struct sdhci_esdhc { bool quirk_tuning_erratum_type1; bool quirk_tuning_erratum_type2; bool quirk_ignore_data_inhibit; + bool quirk_delay_before_data_reset; bool in_sw_tuning; unsigned int peripheral_clock; const struct esdhc_clk_fixup *clk_fixup; @@ -759,14 +760,16 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); u32 val; + if (esdhc->quirk_delay_before_data_reset && + (mask & SDHCI_RESET_DATA) && + (host->flags & SDHCI_REQ_USE_DMA)) + mdelay(5); + sdhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); - if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) - mdelay(5); - if (mask & SDHCI_RESET_ALL) { val = sdhci_readl(host, ESDHC_TBCTL); val &= ~ESDHC_TB_EN; @@ -1221,6 +1224,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) if (match) esdhc->clk_fixup = match->data; np = pdev->dev.of_node; + + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) + esdhc->quirk_delay_before_data_reset = true; + clk = of_clk_get(np, 0); if (!IS_ERR(clk)) { /* @@ -1303,8 +1310,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) { - host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST; - host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST; + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; } if (of_device_is_compatible(np, "fsl,p5040-esdhc") || diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index acefb76b4e15..5091e2c1c0e5 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -27,6 +27,7 @@ #include <linux/mmc/slot-gpio.h> #include <linux/mmc/sdhci-pci-data.h> #include <linux/acpi.h> +#include <linux/dmi.h> #ifdef CONFIG_X86 #include <asm/iosf_mbi.h> @@ -783,11 +784,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) return 0; } +static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && + dmi_match(DMI_BIOS_VENDOR, "LENOVO"); +} + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) { int ret = byt_emmc_probe_slot(slot); - slot->host->mmc->caps2 |= MMC_CAP2_CQE; + if (!glk_broken_cqhci(slot)) + slot->host->mmc->caps2 |= MMC_CAP2_CQE; if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3140fe2e5dba..1b1c26da3fe0 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1882,9 +1882,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) ctrl_2 |= SDHCI_CTRL_UHS_SDR104; else if (timing == MMC_TIMING_UHS_SDR12) ctrl_2 |= SDHCI_CTRL_UHS_SDR12; - else if (timing == MMC_TIMING_SD_HS || - timing == MMC_TIMING_MMC_HS || - timing == MMC_TIMING_UHS_SDR25) + else if (timing == MMC_TIMING_UHS_SDR25) ctrl_2 |= SDHCI_CTRL_UHS_SDR25; else if (timing == MMC_TIMING_UHS_SDR50) ctrl_2 |= SDHCI_CTRL_UHS_SDR50; @@ -2419,8 +2417,8 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_send_tuning(host, opcode); if (!host->tuning_done) { - pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", - mmc_hostname(host->mmc)); + pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); sdhci_abort_tuning(host, opcode); return -ETIMEDOUT; } @@ -3769,6 +3767,9 @@ int sdhci_setup_host(struct sdhci_host *host) mmc_hostname(mmc), host->version); } + if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) + mmc->caps2 &= ~MMC_CAP2_CQE; + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; else if (!(host->caps & SDHCI_CAN_DO_SDMA)) diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 0ed3e0eaef5f..fe83ece6965b 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -409,6 +409,8 @@ struct sdhci_host { #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) /* Controller reports inverted write-protect state */ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) +/* Controller has unusable command queue engine */ +#define SDHCI_QUIRK_BROKEN_CQE (1<<17) /* Controller does not like fast PIO transfers */ #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) /* Controller does not have a LED */ diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c index edf94ee54ec7..aa9368bf7a0c 100644 --- a/drivers/mtd/nand/onenand/omap2.c +++ b/drivers/mtd/nand/onenand/omap2.c @@ -148,13 +148,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) unsigned long timeout; u32 syscfg; - if (state == FL_RESETING || state == FL_PREPARING_ERASE || + if (state == FL_RESETTING || state == FL_PREPARING_ERASE || state == FL_VERIFYING_ERASE) { int i = 21; unsigned int intr_flags = ONENAND_INT_MASTER; switch (state) { - case FL_RESETING: + case FL_RESETTING: intr_flags |= ONENAND_INT_RESET; break; case FL_PREPARING_ERASE: @@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c, struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; - tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0); + tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!tx) { dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n"); return -EIO; @@ -375,7 +376,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, * context fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress ) + count < 384 || in_interrupt() || oops_in_progress) goto out_copy; xtra = count & 3; @@ -422,7 +423,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, * context fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress ) + count < 384 || in_interrupt() || oops_in_progress) goto out_copy; dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE); @@ -528,7 +529,8 @@ static int omap2_onenand_probe(struct platform_device *pdev) c->gpmc_cs, c->phys_base, c->onenand.base, c->dma_chan ? "DMA" : "PIO"); - if ((r = onenand_scan(&c->mtd, 1)) < 0) + r = onenand_scan(&c->mtd, 1); + if (r < 0) goto err_release_dma; freq = omap2_onenand_get_freq(c->onenand.version_id); diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index 77bd32a683e1..85640ee11c86 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -2853,7 +2853,7 @@ static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); status &= 0x60; @@ -2924,7 +2924,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); return ret; } @@ -2968,7 +2968,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); return ret; } @@ -3008,7 +3008,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); } else { ops.mode = MTD_OPS_PLACE_OOB; ops.ooblen = len; @@ -3413,7 +3413,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd) this->boundary[die] = bdry & FLEXONENAND_PI_MASK; this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); printk(KERN_INFO "Die %d boundary: %d%s\n", die, this->boundary[die], locked ? "(Locked)" : "(Unlocked)"); @@ -3635,7 +3635,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die, ret = this->wait(mtd, FL_WRITING); out: this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); if (!ret) /* Recalculate device size on boundary change*/ flexonenand_get_size(mtd); @@ -3671,7 +3671,7 @@ static int onenand_chip_probe(struct mtd_info *mtd) /* Reset OneNAND to read default register values */ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM); /* Wait reset */ - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); /* Restore system configuration 1 */ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); diff --git a/drivers/mtd/nand/onenand/samsung_mtd.c b/drivers/mtd/nand/onenand/samsung_mtd.c index 55e5536a5850..beb7987e4c2b 100644 --- a/drivers/mtd/nand/onenand/samsung_mtd.c +++ b/drivers/mtd/nand/onenand/samsung_mtd.c @@ -675,12 +675,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, normal: if (count != mtd->writesize) { /* Copy the bufferram to memory to prevent unaligned access */ - memcpy(this->page_buf, p, mtd->writesize); - p = this->page_buf + offset; + memcpy_fromio(this->page_buf, p, mtd->writesize); + memcpy(buffer, this->page_buf + offset, count); + } else { + memcpy_fromio(buffer, p, count); } - memcpy(buffer, p, count); - return 0; } diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index 3a36285a8d8a..f6c7102a1e32 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -914,8 +914,8 @@ static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl) /* Prepare CDMA descriptor. */ static void cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, - char nf_mem, u32 flash_ptr, char *mem_ptr, - char *ctrl_data_ptr, u16 ctype) + char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr, + dma_addr_t ctrl_data_ptr, u16 ctype) { struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc; @@ -931,13 +931,13 @@ cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, cdma_desc->command_flags |= CDMA_CF_DMA_MASTER; cdma_desc->command_flags |= CDMA_CF_INT; - cdma_desc->memory_pointer = (uintptr_t)mem_ptr; + cdma_desc->memory_pointer = mem_ptr; cdma_desc->status = 0; cdma_desc->sync_flag_pointer = 0; cdma_desc->sync_arguments = 0; cdma_desc->command_type = ctype; - cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr; + cdma_desc->ctrl_data_ptr = ctrl_data_ptr; } static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl, @@ -1280,8 +1280,7 @@ cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr, } cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page, - (void *)dma_buf, (void *)dma_ctrl_dat, - ctype); + dma_buf, dma_ctrl_dat, ctype); status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); @@ -1360,7 +1359,7 @@ static int cadence_nand_erase(struct nand_chip *chip, u32 page) cadence_nand_cdma_desc_prepare(cdns_ctrl, cdns_chip->cs[chip->cur_cs], - page, NULL, NULL, + page, 0, 0, CDMA_CT_ERASE); status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); if (status) { diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 334fe3130285..b9d5d55a5edb 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this) struct resources *r = &this->resources; int ret; + ret = pm_runtime_get_sync(this->dev); + if (ret < 0) + return ret; + ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) goto err_out; @@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this) */ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); - return 0; err_out: + pm_runtime_mark_last_busy(this->dev); + pm_runtime_put_autosuspend(this->dev); return ret; } @@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev) return ret; } + /* Set flag to get timing setup restored for next exec_op */ + if (this->hw.clk_rate) + this->hw.must_apply_timings = true; + /* re-init the BCH registers */ ret = bch_set_geometry(this); if (ret) { diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 9e63800f768a..3ba73f18841f 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -37,6 +37,7 @@ /* Max ECC buffer length */ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) +#define FMC2_TIMEOUT_US 1000 #define FMC2_TIMEOUT_MS 1000 /* Timings */ @@ -53,6 +54,8 @@ #define FMC2_PMEM 0x88 #define FMC2_PATT 0x8c #define FMC2_HECCR 0x94 +#define FMC2_ISR 0x184 +#define FMC2_ICR 0x188 #define FMC2_CSQCR 0x200 #define FMC2_CSQCFGR1 0x204 #define FMC2_CSQCFGR2 0x208 @@ -118,6 +121,12 @@ #define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) #define FMC2_PATT_DEFAULT 0x0a0a0a0a +/* Register: FMC2_ISR */ +#define FMC2_ISR_IHLF BIT(1) + +/* Register: FMC2_ICR */ +#define FMC2_ICR_CIHLF BIT(1) + /* Register: FMC2_CSQCR */ #define FMC2_CSQCR_CSQSTART BIT(0) @@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, stm32_fmc2_set_buswidth_16(fmc2, true); } +static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + const struct nand_sdr_timings *timings; + u32 isr, sr; + + /* Check if there is no pending requests to the NAND flash */ + if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, + sr & FMC2_SR_NWRF, 1, + FMC2_TIMEOUT_US)) + dev_warn(fmc2->dev, "Waitrdy timeout\n"); + + /* Wait tWB before R/B# signal is low */ + timings = nand_get_sdr_timings(&chip->data_interface); + ndelay(PSEC_TO_NSEC(timings->tWB_max)); + + /* R/B# signal is low, clear high level flag */ + writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR); + + /* Wait R/B# signal is high */ + return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR, + isr, isr & FMC2_ISR_IHLF, + 5, 1000 * timeout_ms); +} + static int stm32_fmc2_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) @@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, break; case NAND_OP_WAITRDY_INSTR: - ret = nand_soft_waitrdy(chip, - instr->ctx.waitrdy.timeout_ms); + ret = stm32_fmc2_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); break; } } diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 4744bf94ad9a..b9f272408c4d 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -247,7 +247,8 @@ static int sm_read_sector(struct sm_ftl *ftl, /* FTL can contain -1 entries that are by default filled with bits */ if (block == -1) { - memset(buffer, 0xFF, SM_SECTOR_SIZE); + if (buffer) + memset(buffer, 0xFF, SM_SECTOR_SIZE); return 0; } diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index f4afe123e9dc..b0cd443dd758 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -2124,6 +2124,8 @@ static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) return 0; + nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; + return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); } @@ -4596,6 +4598,7 @@ static void sst_set_default_init(struct spi_nor *nor) static void st_micron_set_default_init(struct spi_nor *nor) { nor->flags |= SNOR_F_HAS_LOCK; + nor->flags &= ~SNOR_F_HAS_16BIT_SR; nor->params.quad_enable = NULL; nor->params.set_4byte = st_micron_set_4byte; } @@ -4768,9 +4771,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor) static void spansion_post_sfdp_fixups(struct spi_nor *nor) { - struct mtd_info *mtd = &nor->mtd; - - if (mtd->size <= SZ_16M) + if (nor->params.size <= SZ_16M) return; nor->flags |= SNOR_F_4B_OPCODES; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fcb7c2f7f001..48d5ec770b94 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2272,9 +2272,6 @@ static void bond_miimon_commit(struct bonding *bond) } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ bond_set_active_slave(slave); - } else if (slave != primary) { - /* prevent it from being the active one */ - bond_set_backup_slave(slave); } slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", @@ -3702,32 +3699,35 @@ static int bond_neigh_init(struct neighbour *n) const struct net_device_ops *slave_ops; struct neigh_parms parms; struct slave *slave; - int ret; + int ret = 0; - slave = bond_first_slave(bond); + rcu_read_lock(); + slave = bond_first_slave_rcu(bond); if (!slave) - return 0; + goto out; slave_ops = slave->dev->netdev_ops; if (!slave_ops->ndo_neigh_setup) - return 0; - - parms.neigh_setup = NULL; - parms.neigh_cleanup = NULL; - ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); - if (ret) - return ret; + goto out; - /* Assign slave's neigh_cleanup to neighbour in case cleanup is called - * after the last slave has been detached. Assumes that all slaves - * utilize the same neigh_cleanup (true at this writing as only user - * is ipoib). + /* TODO: find another way [1] to implement this. + * Passing a zeroed structure is fragile, + * but at least we do not pass garbage. + * + * [1] One way would be that ndo_neigh_setup() never touch + * struct neigh_parms, but propagate the new neigh_setup() + * back to ___neigh_create() / neigh_parms_alloc() */ - n->parms->neigh_cleanup = parms.neigh_cleanup; + memset(&parms, 0, sizeof(parms)); + ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); - if (!parms.neigh_setup) - return 0; + if (ret) + goto out; - return parms.neigh_setup(n); + if (parms.neigh_setup) + ret = parms.neigh_setup(n); +out: + rcu_read_unlock(); + return ret; } /* The bonding ndo_neigh_setup is called at init time beofre any diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index a929cdda9ab2..94d10ec954a0 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -389,6 +389,34 @@ static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv (&priv->regs->mb[bank][priv->mb_size * mb_index]); } +static int flexcan_low_power_enter_ack(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_low_power_exit_ack(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) + return -ETIMEDOUT; + + return 0; +} + static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) { struct flexcan_regs __iomem *regs = priv->regs; @@ -407,7 +435,6 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int ackval; u32 reg_mcr; reg_mcr = priv->read(®s->mcr); @@ -418,36 +445,24 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); - /* get stop acknowledgment */ - if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr, - ackval, ackval & (1 << priv->stm.ack_bit), - 0, FLEXCAN_TIMEOUT_US)) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_enter_ack(priv); } static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int ackval; u32 reg_mcr; /* remove stop request */ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 0); - /* get stop acknowledgment */ - if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr, - ackval, !(ackval & (1 << priv->stm.ack_bit)), - 0, FLEXCAN_TIMEOUT_US)) - return -ETIMEDOUT; reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, ®s->mcr); - return 0; + return flexcan_low_power_exit_ack(priv); } static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) @@ -506,39 +521,25 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(®s->mcr); reg &= ~FLEXCAN_MCR_MDIS; priv->write(reg, ®s->mcr); - while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - udelay(10); - - if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_exit_ack(priv); } static int flexcan_chip_disable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_MDIS; priv->write(reg, ®s->mcr); - while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - udelay(10); - - if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_enter_ack(priv); } static int flexcan_chip_freeze(struct flexcan_priv *priv) @@ -1722,6 +1723,9 @@ static int __maybe_unused flexcan_resume(struct device *device) netif_start_queue(dev); if (device_may_wakeup(device)) { disable_irq_wake(dev->irq); + err = flexcan_exit_stop_mode(priv); + if (err) + return err; } else { err = pm_runtime_force_resume(device); if (err) @@ -1767,14 +1771,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - int err; - if (netif_running(dev) && device_may_wakeup(device)) { + if (netif_running(dev) && device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, false); - err = flexcan_exit_stop_mode(priv); - if (err) - return err; - } return 0; } diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c index 3db619209fe1..eacd428e07e9 100644 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -101,6 +101,9 @@ #define TCAN4X5X_MODE_STANDBY BIT(6) #define TCAN4X5X_MODE_NORMAL BIT(7) +#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30)) +#define TCAN4X5X_DISABLE_INH_MSK BIT(9) + #define TCAN4X5X_SW_RESET BIT(2) #define TCAN4X5X_MCAN_CONFIGURED BIT(5) @@ -164,6 +167,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) } } +static int tcan4x5x_reset(struct tcan4x5x_priv *priv) +{ + int ret = 0; + + if (priv->reset_gpio) { + gpiod_set_value(priv->reset_gpio, 1); + + /* tpulse_width minimum 30us */ + usleep_range(30, 100); + gpiod_set_value(priv->reset_gpio, 0); + } else { + ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_SW_RESET); + if (ret) + return ret; + } + + usleep_range(700, 1000); + + return ret; +} + static int regmap_spi_gather_write(void *context, const void *reg, size_t reg_len, const void *val, size_t val_len) @@ -338,15 +363,34 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) return ret; } +static int tcan4x5x_disable_wake(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_WAKE_MSK, 0x00); +} + +static int tcan4x5x_disable_state(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_INH_MSK, 0x01); +} + static int tcan4x5x_parse_config(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + int ret; tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", GPIOD_OUT_HIGH); if (IS_ERR(tcan4x5x->device_wake_gpio)) { - dev_err(cdev->dev, "device-wake gpio not defined\n"); - return -EINVAL; + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + tcan4x5x_disable_wake(cdev); } tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", @@ -354,16 +398,17 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev) if (IS_ERR(tcan4x5x->reset_gpio)) tcan4x5x->reset_gpio = NULL; + ret = tcan4x5x_reset(tcan4x5x); + if (ret) + return ret; + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, "device-state", GPIOD_IN); - if (IS_ERR(tcan4x5x->device_state_gpio)) + if (IS_ERR(tcan4x5x->device_state_gpio)) { tcan4x5x->device_state_gpio = NULL; - - tcan4x5x->power = devm_regulator_get_optional(cdev->dev, - "vsup"); - if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) - return -EPROBE_DEFER; + tcan4x5x_disable_state(cdev); + } return 0; } @@ -398,6 +443,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi) if (!priv) return -ENOMEM; + priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); + if (PTR_ERR(priv->power) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else + priv->power = NULL; + mcan_class->device_data = priv; m_can_class_get_clocks(mcan_class); @@ -428,10 +479,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi) spi_set_drvdata(spi, priv); - ret = tcan4x5x_parse_config(mcan_class); - if (ret) - goto out_clk; - /* Configure the SPI bus */ spi->bits_per_word = 32; ret = spi_setup(spi); @@ -441,7 +488,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi) priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus, &spi->dev, &tcan4x5x_regmap); - tcan4x5x_power_enable(priv->power, 1); + ret = tcan4x5x_power_enable(priv->power, 1); + if (ret) + goto out_clk; + + ret = tcan4x5x_parse_config(mcan_class); + if (ret) + goto out_power; + + ret = tcan4x5x_init(mcan_class); + if (ret) + goto out_power; ret = m_can_class_register(mcan_class); if (ret) diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 8caf7af0dee2..99101d7027a8 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) struct net_device *dev = napi->dev; struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; - int npackets = 0; - int ret = 1; + int work_done = 0; struct sk_buff *skb; struct can_frame *frame; u8 canrflg; - while (npackets < quota) { + while (work_done < quota) { canrflg = in_8(®s->canrflg); if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) break; @@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) stats->rx_packets++; stats->rx_bytes += frame->can_dlc; - npackets++; + work_done++; netif_receive_skb(skb); } - if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { - napi_complete(&priv->napi); - clear_bit(F_RX_PROGRESS, &priv->flags); - if (priv->can.state < CAN_STATE_BUS_OFF) - out_8(®s->canrier, priv->shadow_canrier); - ret = 0; + if (work_done < quota) { + if (likely(napi_complete_done(&priv->napi, work_done))) { + clear_bit(F_RX_PROGRESS, &priv->flags); + if (priv->can.state < CAN_STATE_BUS_OFF) + out_8(®s->canrier, priv->shadow_canrier); + } } - return ret; + return work_done; } static irqreturn_t mscan_isr(int irq, void *dev_id) diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 2f74f6704c12..a4b4b742c80c 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf, GS_USB_BREQ_HOST_FORMAT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, - intf->altsetting[0].desc.bInterfaceNumber, + intf->cur_altsetting->desc.bInterfaceNumber, hconf, sizeof(*hconf), 1000); @@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf, GS_USB_BREQ_DEVICE_CONFIG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, - intf->altsetting[0].desc.bInterfaceNumber, + intf->cur_altsetting->desc.bInterfaceNumber, dconf, sizeof(*dconf), 1000); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 5fc0be564274..7ab87a758754 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *ep; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { ep = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 07d2f3aa2c02..1b9957f12459 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, struct kvaser_cmd *cmd; int err; - cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; @@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *endpoint; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 464af939cd8a..c1dbab8c896d 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -60,6 +60,8 @@ enum xcan_reg { XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ + XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ + XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ }; #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) @@ -1809,6 +1811,11 @@ static int xcan_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { + priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); + priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); + } + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, hw_tx_max, priv->tx_max); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 36828f210030..edacacfc9365 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -347,7 +347,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) * frames should be flooded or not. */ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); - mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN; + mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); } @@ -526,6 +526,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + b53_br_egress_floods(ds, port, true, true); + if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); if (ret) @@ -641,6 +643,8 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); b53_brcm_hdr_setup(dev->ds, port); + + b53_br_egress_floods(dev->ds, port, true, true); } static void b53_enable_mib(struct b53_device *dev) @@ -1821,19 +1825,26 @@ int b53_br_egress_floods(struct dsa_switch *ds, int port, struct b53_device *dev = ds->priv; u16 uc, mc; - b53_read16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, &uc); + b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); if (unicast) uc |= BIT(port); else uc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, uc); + b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); + + b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); + if (multicast) + mc |= BIT(port); + else + mc &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); - b53_read16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, &mc); + b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); if (multicast) mc |= BIT(port); else mc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, mc); + b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); return 0; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index e43040c9f9ee..3e8635311d0d 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -68,7 +68,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); - reg |= (MII_SW_OR | LINK_STS); + reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G); core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index f3f0c3f07391..1962c8330daa 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, return -EINVAL; } - ip_frag = be32_to_cpu(fs->m_ext.data[0]); + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); /* Locate the first rule available */ if (fs->location == RX_CLS_LOC_ANY) @@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, if (rule->fs.flow_type != fs->flow_type || rule->fs.ring_cookie != fs->ring_cookie || - rule->fs.m_ext.data[0] != fs->m_ext.data[0]) + rule->fs.h_ext.data[0] != fs->h_ext.data[0]) continue; switch (fs->flow_type & ~FLOW_EXT) { @@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, return -EINVAL; } - ip_frag = be32_to_cpu(fs->m_ext.data[0]); + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); layout = &udf_tcpip6_layout; slice_num = bcm_sf2_get_slice_number(layout, 0); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 120a65d3e3ef..b016cc205f81 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -360,6 +360,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) { u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST; + /* Use the default high priority for management frames sent to + * the CPU. + */ + port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI; + return mv88e6390_g1_monitor_write(chip, ptr, port); } diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index bc5a6b2bb1e4..5324c6f4ae90 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -211,6 +211,7 @@ #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000 +#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff /* Offset 0x1C: Global Control 2 */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 7fe256c5739d..0b43c650e100 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port) } static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, - phy_interface_t mode) + phy_interface_t mode, bool force) { u8 lane; u16 cmode; @@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } - /* cmode doesn't change, nothing to do for us */ - if (cmode == chip->ports[port].cmode) + /* cmode doesn't change, nothing to do for us unless forced */ + if (cmode == chip->ports[port].cmode && !force) return 0; lane = mv88e6xxx_serdes_get_lane(chip, port); @@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (port != 9 && port != 10) return -EOPNOTSUPP; - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, false); } int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, @@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, break; } - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, false); } static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip, @@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, true); } int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index 0031ca814346..6f9804093150 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -2,6 +2,7 @@ config NET_DSA_MSCC_FELIX tristate "Ocelot / Felix Ethernet switch support" depends on NET_DSA && PCI + depends on NET_VENDOR_MICROSEMI select MSCC_OCELOT_SWITCH select NET_DSA_TAG_OCELOT help diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index a51ac088c0bc..bb91f3d17cf2 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -582,7 +582,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, struct device *dev = &priv->spidev->dev; struct device_node *child; - for_each_child_of_node(ports_node, child) { + for_each_available_child_of_node(ports_node, child) { struct device_node *phy_node; phy_interface_t phy_mode; u32 index; @@ -1569,8 +1569,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) if (enabled) { /* Enable VLAN filtering. */ - tpid = ETH_P_8021AD; - tpid2 = ETH_P_8021Q; + tpid = ETH_P_8021Q; + tpid2 = ETH_P_8021AD; } else { /* Disable VLAN filtering. */ tpid = ETH_P_SJA1105; @@ -1579,9 +1579,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; general_params = table->entries; - /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ - general_params->tpid = tpid; /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ + general_params->tpid = tpid; + /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ general_params->tpid2 = tpid2; /* When VLAN filtering is on, we need to at least be able to * decode management traffic through the "backup plan". @@ -1855,7 +1855,7 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, if (!clone) goto out; - sja1105_ptp_txtstamp_skb(ds, slot, clone); + sja1105_ptp_txtstamp_skb(ds, port, clone); out: mutex_unlock(&priv->mgmt_lock); diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index 54258a25031d..43ab7589d0d0 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -234,7 +234,7 @@ int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd, if (rw == SPI_WRITE) priv->info->ptp_cmd_packing(buf, cmd, PACK); - rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf, + rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf, SJA1105_SIZE_PTP_CMD); if (rw == SPI_READ) @@ -659,7 +659,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds) ptp_data->clock = NULL; } -void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, +void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct sja1105_private *priv = ds->priv; @@ -679,7 +679,7 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, goto out; } - rc = sja1105_ptpegr_ts_poll(ds, slot, &ts); + rc = sja1105_ptpegr_ts_poll(ds, port, &ts); if (rc < 0) { dev_err(ds->dev, "timed out polling for tstamp\n"); kfree_skb(skb); diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index 0d03e13e9909..63d2311817c4 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, return size; } +/* TPID and TPID2 are intentionally reversed so that semantic + * compatibility with E/T is kept. + */ static size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, enum packing_op op) @@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op); sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op); sja1105_packing(buf, &entry->vlmask, 106, 75, size, op); - sja1105_packing(buf, &entry->tpid, 74, 59, size, op); + sja1105_packing(buf, &entry->tpid2, 74, 59, size, op); sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op); - sja1105_packing(buf, &entry->tpid2, 57, 42, size, op); + sja1105_packing(buf, &entry->tpid, 57, 42, size, op); sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op); sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op); sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op); diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c index 26b925b5dace..fa6750d973d7 100644 --- a/drivers/net/dsa/sja1105/sja1105_tas.c +++ b/drivers/net/dsa/sja1105/sja1105_tas.c @@ -477,11 +477,6 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, if (admin->cycle_time_extension) return -ENOTSUPP; - if (!ns_to_sja1105_delta(admin->base_time)) { - dev_err(ds->dev, "A base time of zero is not hardware-allowed\n"); - return -ERANGE; - } - for (i = 0; i < admin->num_entries; i++) { s64 delta_ns = admin->entries[i].interval; s64 delta_cycles = ns_to_sja1105_delta(delta_ns); diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 7c941eba0bc9..0ce37d54ed10 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -72,7 +72,7 @@ /*****************************************************************************/ /* ENA adaptive interrupt moderation settings */ -#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196 +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index a3250dcf7d53..fc96c66b44cb 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -315,10 +315,9 @@ static int ena_get_coalesce(struct net_device *net_dev, ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * ena_dev->intr_delay_resolution; - if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) - coalesce->rx_coalesce_usecs = - ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) - * ena_dev->intr_delay_resolution; + coalesce->rx_coalesce_usecs = + ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) + * ena_dev->intr_delay_resolution; coalesce->use_adaptive_rx_coalesce = ena_com_get_adaptive_moderation_enabled(ena_dev); @@ -367,12 +366,6 @@ static int ena_set_coalesce(struct net_device *net_dev, ena_update_tx_rings_intr_moderation(adapter); - if (coalesce->use_adaptive_rx_coalesce) { - if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) - ena_com_enable_adaptive_moderation(ena_dev); - return 0; - } - rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, coalesce->rx_coalesce_usecs); if (rc) @@ -380,10 +373,13 @@ static int ena_set_coalesce(struct net_device *net_dev, ena_update_rx_rings_intr_moderation(adapter); - if (!coalesce->use_adaptive_rx_coalesce) { - if (ena_com_get_adaptive_moderation_enabled(ena_dev)) - ena_com_disable_adaptive_moderation(ena_dev); - } + if (coalesce->use_adaptive_rx_coalesce && + !ena_com_get_adaptive_moderation_enabled(ena_dev)) + ena_com_enable_adaptive_moderation(ena_dev); + + if (!coalesce->use_adaptive_rx_coalesce && + ena_com_get_adaptive_moderation_enabled(ena_dev)) + ena_com_disable_adaptive_moderation(ena_dev); return 0; } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d46a912002ff..948583fdcc28 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1238,8 +1238,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget) struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_ring *tx_ring, *rx_ring; - u32 tx_work_done; - u32 rx_work_done; + int tx_work_done; + int rx_work_done = 0; int tx_budget; int napi_comp_call = 0; int ret; @@ -1256,7 +1256,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget) } tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); - rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); + /* On netpoll the budget is zero and the handler should only clean the + * tx completions. + */ + if (likely(budget)) + rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); /* If the device is about to reset or down, avoid unmask * the interrupt and return 0 so NAPI won't reschedule diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index a17a4da7bc15..c85e3e29012c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -403,6 +403,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; + aq_nic_set_loopback(self); + err = self->aq_hw_ops->hw_start(self->aq_hw); if (err < 0) goto err_exit; @@ -413,8 +415,6 @@ int aq_nic_start(struct aq_nic_s *self) INIT_WORK(&self->service_task, aq_nic_service_task); - aq_nic_set_loopback(self); - timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); aq_nic_service_timer_cb(&self->service_timer); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 58e891af6e09..ec041f78d063 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -1525,9 +1525,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .rx_extract_ts = hw_atl_b0_rx_extract_ts, .extract_hwts = hw_atl_b0_extract_hwts, .hw_set_offload = hw_atl_b0_hw_offload_set, - .hw_get_hw_stats = hw_atl_utils_get_hw_stats, - .hw_get_fw_version = hw_atl_utils_get_fw_version, - .hw_set_offload = hw_atl_b0_hw_offload_set, .hw_set_loopback = hw_atl_b0_set_loopback, .hw_set_fc = hw_atl_b0_set_fc, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 8910b62e67ed..f547baa6c954 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -667,9 +667,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) u32 speed; mpi_state = hw_atl_utils_mpi_get_state(self); - speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | - FW2X_RATE_2G5 | FW2X_RATE_5G | - FW2X_RATE_10G); + speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT; if (!speed) { link_status->mbps = 0U; diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 8f5021091eee..61a334d1b5e6 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -313,7 +313,7 @@ struct ag71xx { struct ag71xx_desc *stop_desc; dma_addr_t stop_desc_dma; - int phy_if_mode; + phy_interface_t phy_if_mode; struct delayed_work restart_work; struct timer_list oom_timer; @@ -1744,7 +1744,7 @@ static int ag71xx_probe(struct platform_device *pdev) eth_random_addr(ndev->dev_addr); } - err = of_get_phy_mode(np, ag->phy_if_mode); + err = of_get_phy_mode(np, &ag->phy_if_mode); if (err) { netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); goto err_free; diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 035dbb1b2c98..ec25fd81985d 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1516,8 +1516,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) int ethaddr_bytes = ETH_ALEN; memset(ppattern + offset, 0xff, magicsync); - for (j = 0; j < magicsync; j++) - set_bit(len++, (unsigned long *) pmask); + for (j = 0; j < magicsync; j++) { + pmask[len >> 3] |= BIT(len & 7); + len++; + } for (j = 0; j < B44_MAX_PATTERNS; j++) { if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) @@ -1529,7 +1531,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) for (k = 0; k< ethaddr_bytes; k++) { ppattern[offset + magicsync + (j * ETH_ALEN) + k] = macaddr[k]; - set_bit(len++, (unsigned long *) pmask); + pmask[len >> 3] |= BIT(len & 7); + len++; } } return len - 1; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 825af709708e..d6b1a153f9df 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, ring->switch_queue = qp; ring->switch_port = port; ring->inspect = true; - priv->ring_map[q + port * num_tx_queues] = ring; + priv->ring_map[qp + port * num_tx_queues] = ring; qp++; } @@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, struct net_device *slave_dev; unsigned int num_tx_queues; struct net_device *dev; - unsigned int q, port; + unsigned int q, qp, port; priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); if (priv->netdev != info->master) @@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, continue; ring->inspect = false; - priv->ring_map[q + port * num_tx_queues] = NULL; + qp = ring->switch_queue; + priv->ring_map[qp + port * num_tx_queues] = NULL; } return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 8b08cb18e363..3f63ffd7561b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1109,7 +1109,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) for (i = 0; i < E1H_FUNC_MAX / 2; i++) { u32 func_config = MF_CFG_RD(bp, - func_mf_config[BP_PORT(bp) + 2 * i]. + func_mf_config[BP_PATH(bp) + 2 * i]. config); func_num += ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 192ff8d5da32..cff64e43bdd8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9976,10 +9976,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp) */ static void bnx2x_parity_recover(struct bnx2x *bp) { - bool global = false; u32 error_recovered, error_unrecovered; - bool is_parity; + bool is_parity, global = false; +#ifdef CONFIG_BNX2X_SRIOV + int vf_idx; + + for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + if (vf) + vf->state = VF_LOST; + } +#endif DP(NETIF_MSG_HW, "Handling parity\n"); while (1) { switch (bp->recovery_state) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 7a6e82db4231..bacc8552bce1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1536,8 +1536,11 @@ void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \ func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT) +#define BNX2X_VFS_VLAN_CREDIT(bp) \ + (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) + #define PF_VLAN_CREDIT_E2(bp, func_num) \ - ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \ + ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \ func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT) #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index b6ebd92ec565..3a716c015415 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -139,6 +139,7 @@ struct bnx2x_virtf { #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ +#define VF_LOST 4 /* Recovery while VFs are loaded */ bool flr_clnup_stage; /* true during flr cleanup */ bool malicious; /* true if FW indicated so, until FLR */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0752b7fa4d9c..ea0e9394f898 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -2107,6 +2107,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, { int i; + if (vf->state == VF_LOST) { + /* Just ack the FW and return if VFs are lost + * in case of parity error. VFs are supposed to be timedout + * on waiting for PF response. + */ + DP(BNX2X_MSG_IOV, + "VF 0x%x lost, not handling the request\n", vf->abs_vfid); + + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + return; + } + /* check if tlv type is known */ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { /* Lock the per vf op mutex and note the locker's identity. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 85983f0e3134..e6f18f6070ef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2001,6 +2001,9 @@ static int bnxt_async_event_process(struct bnxt *bp, case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { u32 data1 = le32_to_cpu(cmpl->event_data1); + if (!bp->fw_health) + goto async_event_process_exit; + bp->fw_reset_timestamp = jiffies; bp->fw_reset_min_dsecs = cmpl->timestamp_lo; if (!bp->fw_reset_min_dsecs) @@ -4421,8 +4424,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | - FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; @@ -6186,7 +6190,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); val = clamp_t(u16, tmr, 1, coal_cap->cmpl_aggr_dma_tmr_during_int_max); - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); req->enables |= cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); } @@ -7115,14 +7119,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) goto err_recovery_out; - if (!fw_health) { - fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL); - bp->fw_health = fw_health; - if (!fw_health) { - rc = -ENOMEM; - goto err_recovery_out; - } - } fw_health->flags = le32_to_cpu(resp->flags); if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { @@ -8796,6 +8792,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (fw_reset) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_ulp_stop(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rc = bnxt_fw_init_one(bp); if (rc) { set_bit(BNXT_STATE_ABORT_ERR, &bp->state); @@ -9990,8 +9989,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) struct bnxt_fw_health *fw_health = bp->fw_health; u32 val; - if (!fw_health || !fw_health->enabled || - test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return; if (fw_health->tmr_counter) { @@ -10482,6 +10480,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +static void bnxt_alloc_fw_health(struct bnxt *bp) +{ + if (bp->fw_health) + return; + + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); + if (!bp->fw_health) { + netdev_warn(bp->dev, "Failed to allocate fw_health\n"); + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + } +} + static int bnxt_fw_init_one_p1(struct bnxt *bp) { int rc; @@ -10528,6 +10543,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", rc); + bnxt_alloc_fw_health(bp); rc = bnxt_hwrm_error_recovery_qcfg(bp); if (rc) netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", @@ -10609,6 +10625,12 @@ static int bnxt_fw_init_one(struct bnxt *bp) rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); if (rc) return rc; + + /* In case fw capabilities have changed, destroy the unneeded + * reporters and create newly capable ones. + */ + bnxt_dl_fw_reporters_destroy(bp, false); + bnxt_dl_fw_reporters_create(bp); bnxt_fw_init_one_p3(bp); return 0; } @@ -10751,8 +10773,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); return; case BNXT_FW_RESET_STATE_ENABLE_DEV: - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && - bp->fw_health) { + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { u32 val; val = bnxt_fw_health_readl(bp, @@ -11044,11 +11065,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys2 = &f2->fkeys; - if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && - keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && - keys1->ports.ports == keys2->ports.ports && - keys1->basic.ip_proto == keys2->basic.ip_proto && - keys1->basic.n_proto == keys2->basic.n_proto && + if (keys1->basic.n_proto != keys2->basic.n_proto || + keys1->basic.ip_proto != keys2->basic.ip_proto) + return false; + + if (keys1->basic.n_proto == htons(ETH_P_IP)) { + if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) + return false; + } else { + if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, + sizeof(keys1->addrs.v6addrs.src)) || + memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, + sizeof(keys1->addrs.v6addrs.dst))) + return false; + } + + if (keys1->ports.ports == keys2->ports.ports && keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) @@ -11340,7 +11373,7 @@ int bnxt_get_port_parent_id(struct net_device *dev, return -EOPNOTSUPP; /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) return -EOPNOTSUPP; ppid->id_len = sizeof(bp->switch_id); @@ -11396,11 +11429,11 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) { + if (BNXT_PF(bp)) bnxt_sriov_disable(bp); - bnxt_dl_unregister(bp); - } + bnxt_dl_fw_reporters_destroy(bp, true); + bnxt_dl_unregister(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); bnxt_shutdown_tc(bp); @@ -11415,6 +11448,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_dcb_free(bp); kfree(bp->edev); bp->edev = NULL; + kfree(bp->fw_health); + bp->fw_health = NULL; bnxt_cleanup_pci(bp); bnxt_free_ctx_mem(bp); kfree(bp->ctx); @@ -11711,6 +11746,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) put_unaligned_le32(dw, &dsn[0]); pci_read_config_dword(pdev, pos + 4, &dw); put_unaligned_le32(dw, &dsn[4]); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; } @@ -11822,9 +11858,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_PF(bp)) { /* Read the adapter's DSN to use as the eswitch switch_id */ - rc = bnxt_pcie_dsn_get(bp, bp->switch_id); - if (rc) - goto init_err_pci_clean; + bnxt_pcie_dsn_get(bp, bp->switch_id); } /* MTU range: 60 - FW defined max */ @@ -11875,8 +11909,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_cleanup_tc; - if (BNXT_PF(bp)) - bnxt_dl_register(bp); + bnxt_dl_register(bp); + bnxt_dl_fw_reporters_create(bp); netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 505af5cfb1bd..f14335433a64 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1532,6 +1532,7 @@ struct bnxt { #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 #define BNXT_FLAG_MULTI_HOST 0x100000 + #define BNXT_FLAG_DSN_VALID 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 @@ -1936,9 +1937,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type) case HWRM_CFA_ENCAP_RECORD_FREE: case HWRM_CFA_DECAP_FILTER_ALLOC: case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_ALLOC: - case HWRM_CFA_NTUPLE_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_CFG: case HWRM_CFA_EM_FLOW_ALLOC: case HWRM_CFA_EM_FLOW_FREE: case HWRM_CFA_EM_FLOW_CFG: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index acb2dd64c023..3eedd4477218 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -39,11 +39,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); - struct bnxt_fw_health *health = bp->fw_health; u32 val, health_status; int rc; - if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return 0; val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); @@ -126,21 +125,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = { .recover = bnxt_fw_fatal_recover, }; -static void bnxt_dl_fw_reporters_create(struct bnxt *bp) +void bnxt_dl_fw_reporters_create(struct bnxt *bp) { struct bnxt_fw_health *health = bp->fw_health; - if (!health) + if (!bp->dl || !health) return; - health->fw_reporter = - devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops, - 0, false, bp); - if (IS_ERR(health->fw_reporter)) { - netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", - PTR_ERR(health->fw_reporter)); - health->fw_reporter = NULL; - } + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter) + goto err_recovery; health->fw_reset_reporter = devlink_health_reporter_create(bp->dl, @@ -150,8 +143,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp) netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", PTR_ERR(health->fw_reset_reporter)); health->fw_reset_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + } + +err_recovery: + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + if (!health->fw_reporter) { + health->fw_reporter = + devlink_health_reporter_create(bp->dl, + &bnxt_dl_fw_reporter_ops, + 0, false, bp); + if (IS_ERR(health->fw_reporter)) { + netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", + PTR_ERR(health->fw_reporter)); + health->fw_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return; + } } + if (health->fw_fatal_reporter) + return; + health->fw_fatal_reporter = devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_fatal_reporter_ops, @@ -160,24 +175,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp) netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", PTR_ERR(health->fw_fatal_reporter)); health->fw_fatal_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; } } -static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) { struct bnxt_fw_health *health = bp->fw_health; - if (!health) + if (!bp->dl || !health) return; - if (health->fw_reporter) - devlink_health_reporter_destroy(health->fw_reporter); - - if (health->fw_reset_reporter) + if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) && + health->fw_reset_reporter) { devlink_health_reporter_destroy(health->fw_reset_reporter); + health->fw_reset_reporter = NULL; + } - if (health->fw_fatal_reporter) + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all) + return; + + if (health->fw_reporter) { + devlink_health_reporter_destroy(health->fw_reporter); + health->fw_reporter = NULL; + } + + if (health->fw_fatal_reporter) { devlink_health_reporter_destroy(health->fw_fatal_reporter); + health->fw_fatal_reporter = NULL; + } } void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) @@ -185,9 +211,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) struct bnxt_fw_health *fw_health = bp->fw_health; struct bnxt_fw_reporter_ctx fw_reporter_ctx; - if (!fw_health) - return; - fw_reporter_ctx.sp_event = event; switch (event) { case BNXT_FW_RESET_NOTIFY_SP_EVENT: @@ -247,6 +270,8 @@ static const struct devlink_ops bnxt_dl_ops = { .flash_update = bnxt_dl_flash_update, }; +static const struct devlink_ops bnxt_vf_dl_ops; + enum bnxt_dl_param_id { BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, @@ -460,7 +485,10 @@ int bnxt_dl_register(struct bnxt *bp) return -ENOTSUPP; } - dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + if (BNXT_PF(bp)) + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + else + dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); if (!dl) { netdev_warn(bp->dev, "devlink_alloc failed"); return -ENOMEM; @@ -479,6 +507,9 @@ int bnxt_dl_register(struct bnxt *bp) goto err_dl_free; } + if (!BNXT_PF(bp)) + return 0; + rc = devlink_params_register(dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); if (rc) { @@ -506,8 +537,6 @@ int bnxt_dl_register(struct bnxt *bp) devlink_params_publish(dl); - bnxt_dl_fw_reporters_create(bp); - return 0; err_dl_port_unreg: @@ -530,12 +559,14 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; - bnxt_dl_fw_reporters_destroy(bp); - devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - devlink_port_unregister(&bp->dl_port); - devlink_params_unregister(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); + if (BNXT_PF(bp)) { + devlink_port_params_unregister(&bp->dl_port, + bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); + devlink_port_unregister(&bp->dl_port); + devlink_params_unregister(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + } devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 665d4bdcd8c0..6db6c3dac472 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -58,6 +58,8 @@ struct bnxt_dl_nvm_param { void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy); +void bnxt_dl_fw_reporters_create(struct bnxt *bp); +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2ccf79cdcb1e..08d56ec7b68a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -3071,8 +3071,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, } } - if (info->dest_buf) - memcpy(info->dest_buf + off, dma_buf, len); + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { + memcpy(info->dest_buf + off, dma_buf, len); + } else { + rc = -ENOBUFS; + break; + } + } if (cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) @@ -3126,7 +3133,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, - void *buf, u32 offset) + void *buf, u32 buf_len, u32 offset) { struct hwrm_dbg_coredump_retrieve_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; @@ -3141,8 +3148,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, data_len); - if (buf) + if (buf) { info.dest_buf = buf + offset; + info.buf_len = buf_len; + info.seg_start = offset; + } rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); if (!rc) @@ -3232,14 +3242,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) { u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; struct coredump_segment_record *seg_record = NULL; - u32 offset = 0, seg_hdr_len, seg_record_len; struct bnxt_coredump_segment_hdr seg_hdr; struct bnxt_coredump coredump = {NULL}; time64_t start_time; u16 start_utc; int rc = 0, i; + if (buf) + buf_len = *dump_len; + start_time = ktime_get_real_seconds(); start_utc = sys_tz.tz_minuteswest * 60; seg_hdr_len = sizeof(seg_hdr); @@ -3272,6 +3285,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) u32 duration = 0, seg_len = 0; unsigned long start, end; + if (buf && ((offset + seg_hdr_len) > + BNXT_COREDUMP_BUF_LEN(buf_len))) { + rc = -ENOBUFS; + goto err; + } + start = jiffies; rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); @@ -3284,9 +3303,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) /* Write segment data into the buffer */ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, - &seg_len, buf, + &seg_len, buf, buf_len, offset + seg_hdr_len); - if (rc) + if (rc && rc == -ENOBUFS) + goto err; + else if (rc) netdev_err(bp->dev, "Failed to retrieve coredump for seg = %d\n", seg_record->segment_id); @@ -3316,7 +3337,8 @@ err: rc); kfree(coredump.data); *dump_len += sizeof(struct bnxt_coredump_record); - + if (rc == -ENOBUFS) + netdev_err(bp->dev, "Firmware returned large coredump buffer"); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 4428d0abcbc1..3576d951727b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -31,6 +31,8 @@ struct bnxt_coredump { u16 total_segs; }; +#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record)) + struct bnxt_hwrm_dbg_dma_info { void *dest_buf; int dest_buf_size; @@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info { u16 seq_off; u16 data_len_off; u16 segs; + u32 seg_start; + u32 buf_len; }; struct hwrm_dbg_cmn_input { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index c601ff7b8f61..4a316c4b3fa8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); + struct bnxt_hw_resc *hw_resc; int max_idx, max_cp_rings; int avail_msix, idx; + int total_vecs; int rc = 0; ASSERT_RTNL(); @@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, } edev->ulp_tbl[ulp_id].msix_base = idx; edev->ulp_tbl[ulp_id].msix_requested = avail_msix; - if (bp->total_irqs < (idx + avail_msix)) { + hw_resc = &bp->hw_resc; + total_vecs = idx + avail_msix; + if (bp->total_irqs < total_vecs || + (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) { if (netif_running(dev)) { bnxt_close_nic(bp, true, false); rc = bnxt_open_nic(bp, true, false); @@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, } if (BNXT_NEW_RM(bp)) { - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int resv_msix; resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index f9bf7d7250ab..b010b34cdaf8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp) struct net_device *dev; int rc, i; + if (!(bp->flags & BNXT_FLAG_DSN_VALID)) + return -ENODEV; + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); if (!bp->vf_reps) return -ENOMEM; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 9c767ee252ac..f7d87c71aaa9 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -611,21 +611,24 @@ static const struct phylink_mac_ops macb_phylink_ops = { .mac_link_up = macb_mac_link_up, }; +static bool macb_phy_handle_exists(struct device_node *dn) +{ + dn = of_parse_phandle(dn, "phy-handle", 0); + of_node_put(dn); + return dn != NULL; +} + static int macb_phylink_connect(struct macb *bp) { + struct device_node *dn = bp->pdev->dev.of_node; struct net_device *dev = bp->dev; struct phy_device *phydev; int ret; - if (bp->pdev->dev.of_node && - of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) { - ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node, - 0); - if (ret) { - netdev_err(dev, "Could not attach PHY (%d)\n", ret); - return ret; - } - } else { + if (dn) + ret = phylink_of_phy_connect(bp->phylink, dn, 0); + + if (!dn || (ret && !macb_phy_handle_exists(dn))) { phydev = phy_find_first(bp->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); @@ -634,10 +637,11 @@ static int macb_phylink_connect(struct macb *bp) /* attach the mac to the phy */ ret = phylink_connect_phy(bp->phylink, phydev); - if (ret) { - netdev_err(dev, "Could not attach to PHY (%d)\n", ret); - return ret; - } + } + + if (ret) { + netdev_err(dev, "Could not attach PHY (%d)\n", ret); + return ret; } phylink_start(bp->phylink); @@ -664,9 +668,30 @@ static int macb_mii_probe(struct net_device *dev) return 0; } +static int macb_mdiobus_register(struct macb *bp) +{ + struct device_node *child, *np = bp->pdev->dev.of_node; + + /* Only create the PHY from the device tree if at least one PHY is + * described. Otherwise scan the entire MDIO bus. We do this to support + * old device tree that did not follow the best practices and did not + * describe their network PHYs. + */ + for_each_available_child_of_node(np, child) + if (of_mdiobus_child_is_phy(child)) { + /* The loop increments the child refcount, + * decrement it before returning. + */ + of_node_put(child); + + return of_mdiobus_register(bp->mii_bus, np); + } + + return mdiobus_register(bp->mii_bus); +} + static int macb_mii_init(struct macb *bp) { - struct device_node *np; int err = -ENXIO; /* Enable management port */ @@ -688,9 +713,7 @@ static int macb_mii_init(struct macb *bp) dev_set_drvdata(&bp->dev->dev, bp->mii_bus); - np = bp->pdev->dev.of_node; - - err = of_mdiobus_register(bp->mii_bus, np); + err = macb_mdiobus_register(bp); if (err) goto err_out_free_mdiobus; @@ -4069,7 +4092,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, mgmt->rate = 0; mgmt->hw.init = &init; - *tx_clk = clk_register(NULL, &mgmt->hw); + *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); if (IS_ERR(*tx_clk)) return PTR_ERR(*tx_clk); @@ -4397,7 +4420,6 @@ err_out_free_netdev: err_disable_clocks: clk_disable_unprepare(tx_clk); - clk_unregister(tx_clk); clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); clk_disable_unprepare(rx_clk); @@ -4427,7 +4449,6 @@ static int macb_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { clk_disable_unprepare(bp->tx_clk); - clk_unregister(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); clk_disable_unprepare(bp->rx_clk); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index a70ac2097892..becee29f5df7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -504,6 +504,7 @@ struct link_config { enum cc_pause requested_fc; /* flow control user has requested */ enum cc_pause fc; /* actual link flow control */ + enum cc_pause advertised_fc; /* actual advertised flow control */ enum cc_fec requested_fec; /* Forward Error Correction: */ enum cc_fec fec; /* requested and actual in use */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 93868dca186a..aca9f7a20a2a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3048,6 +3048,9 @@ static int sge_queue_entries(const struct adapter *adap) int tot_uld_entries = 0; int i; + if (!is_uld(adap)) + goto lld_only; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_TX_MAX; i++) tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i); @@ -3058,6 +3061,7 @@ static int sge_queue_entries(const struct adapter *adap) } mutex_unlock(&uld_mutex); +lld_only: return DIV_ROUND_UP(adap->sge.ethqsets, 4) + (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) + tot_uld_entries + diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 20ab3b6285a2..c837382ee522 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -807,8 +807,8 @@ static void get_pauseparam(struct net_device *dev, struct port_info *p = netdev_priv(dev); epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; - epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; + epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0; } static int set_pauseparam(struct net_device *dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 12ff69b3ba91..0dedd3e9c31e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3135,9 +3135,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; + struct ch_sched_queue qe = { 0 }; + struct ch_sched_params p = { 0 }; struct sched_class *e; - struct ch_sched_params p; - struct ch_sched_queue qe; u32 req_rate; int err = 0; @@ -3154,6 +3154,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return -EINVAL; } + qe.queue = index; + e = cxgb4_sched_queue_lookup(dev, &qe); + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { + dev_err(adap->pdev_dev, + "Queue %u already bound to class %u of type: %u\n", + index, e->idx, e->info.u.params.level); + return -EBUSY; + } + /* Convert from Mbps to Kbps */ req_rate = rate * 1000; @@ -3183,7 +3192,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return 0; /* Fetch any available unused or matching scheduling class */ - memset(&p, 0, sizeof(p)); p.type = SCHED_CLASS_TYPE_PACKET; p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; p.u.params.mode = SCHED_CLASS_MODE_CLASS; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index 102b370fbd3e..6d485803ddbe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, struct flow_action *actions = &cls->rule->action; struct port_info *pi = netdev2pinfo(dev); struct flow_action_entry *entry; + struct ch_sched_queue qe; + struct sched_class *e; u64 max_link_rate; u32 i, speed; int ret; @@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, } } + for (i = 0; i < pi->nqsets; i++) { + memset(&qe, 0, sizeof(qe)); + qe.queue = i; + + e = cxgb4_sched_queue_lookup(dev, &qe); + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) { + NL_SET_ERR_MSG_MOD(extack, + "Some queues are already bound to different class"); + return -EBUSY; + } + } + return 0; } +static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc) +{ + struct port_info *pi = netdev2pinfo(dev); + struct ch_sched_queue qe; + int ret; + u32 i; + + for (i = 0; i < pi->nqsets; i++) { + qe.queue = i; + qe.class = tc; + ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE); + if (ret) + goto out_free; + } + + return 0; + +out_free: + while (i--) { + qe.queue = i; + qe.class = SCHED_CLS_NONE; + cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); + } + + return ret; +} + +static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct ch_sched_queue qe; + u32 i; + + for (i = 0; i < pi->nqsets; i++) { + qe.queue = i; + qe.class = SCHED_CLS_NONE; + cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); + } +} + static int cxgb4_matchall_alloc_tc(struct net_device *dev, struct tc_cls_matchall_offload *cls) { @@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, struct adapter *adap = netdev2adap(dev); struct flow_action_entry *entry; struct sched_class *e; + int ret; u32 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; @@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, return -ENOMEM; } + ret = cxgb4_matchall_tc_bind_queues(dev, e->idx); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Could not bind queues to traffic class"); + goto out_free; + } + tc_port_matchall->egress.hwtc = e->idx; tc_port_matchall->egress.cookie = cls->cookie; tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; + +out_free: + cxgb4_sched_class_free(dev, e->idx); + return ret; } static void cxgb4_matchall_free_tc(struct net_device *dev) @@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev) struct adapter *adap = netdev2adap(dev); tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + cxgb4_matchall_tc_unbind_queues(dev); cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c index 477973d2e341..ec3eb45ee3b4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -12,8 +12,9 @@ static int cxgb4_mqprio_validate(struct net_device *dev, struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); u32 speed, qcount = 0, qoffset = 0; + u32 start_a, start_b, end_a, end_b; int ret; - u8 i; + u8 i, j; if (!mqprio->qopt.num_tc) return 0; @@ -47,6 +48,31 @@ static int cxgb4_mqprio_validate(struct net_device *dev, qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset); qcount += mqprio->qopt.count[i]; + start_a = mqprio->qopt.offset[i]; + end_a = start_a + mqprio->qopt.count[i] - 1; + for (j = i + 1; j < mqprio->qopt.num_tc; j++) { + start_b = mqprio->qopt.offset[j]; + end_b = start_b + mqprio->qopt.count[j] - 1; + + /* If queue count is 0, then the traffic + * belonging to this class will not use + * ETHOFLD queues. So, no need to validate + * further. + */ + if (!mqprio->qopt.count[i]) + break; + + if (!mqprio->qopt.count[j]) + continue; + + if (max_t(u32, start_a, start_b) <= + min_t(u32, end_a, end_b)) { + netdev_err(dev, + "Queues can't overlap across tc\n"); + return -EINVAL; + } + } + /* Convert byte per second to bits per second */ min_rate += (mqprio->min_rate[i] * 8); max_rate += (mqprio->max_rate[i] * 8); @@ -145,6 +171,10 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev) kfree(adap->sge.eohw_rxq); return -ENOMEM; } + + refcount_set(&adap->tc_mqprio->refcnt, 1); + } else { + refcount_inc(&adap->tc_mqprio->refcnt); } if (!(adap->flags & CXGB4_USING_MSIX)) @@ -205,7 +235,6 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev) cxgb4_enable_rx(adap, &eorxq->rspq); } - refcount_inc(&adap->tc_mqprio->refcnt); return 0; out_free_msix: @@ -234,9 +263,10 @@ out_free_queues: t4_sge_free_ethofld_txq(adap, eotxq); } - kfree(adap->sge.eohw_txq); - kfree(adap->sge.eohw_rxq); - + if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) { + kfree(adap->sge.eohw_txq); + kfree(adap->sge.eohw_rxq); + } return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 3e61bd5d0c29..cebe1412d960 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi, return found; } +struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, + struct ch_sched_queue *p) +{ + struct port_info *pi = netdev2pinfo(dev); + struct sched_queue_entry *qe = NULL; + struct adapter *adap = pi->adapter; + struct sge_eth_txq *txq; + + if (p->queue < 0 || p->queue >= pi->nqsets) + return NULL; + + txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; + qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); + return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL; +} + static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) { struct sched_queue_entry *qe = NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index e92ff68bdd0a..5cc74a5a1774 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id) return true; } +struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, + struct ch_sched_queue *p); int cxgb4_sched_class_bind(struct net_device *dev, void *arg, enum sched_bind_type type); int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 19d18acfc9a6..844fdcf55118 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) if (cc_pause & PAUSE_TX) fw_pause |= FW_PORT_CAP32_802_3_PAUSE; else - fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; + fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR | + FW_PORT_CAP32_802_3_PAUSE; } else if (cc_pause & PAUSE_TX) { fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; } @@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) { const struct fw_port_cmd *cmd = (const void *)rpl; - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); - struct adapter *adapter = pi->adapter; + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; struct link_config *lc = &pi->link_cfg; - int link_ok, linkdnrc; - enum fw_port_type port_type; + struct adapter *adapter = pi->adapter; + unsigned int speed, fc, fec, adv_fc; enum fw_port_module_type mod_type; - unsigned int speed, fc, fec; - fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + int action, link_ok, linkdnrc; + enum fw_port_type port_type; /* Extract the various fields from the Port Information message. */ + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); switch (action) { case FW_PORT_ACTION_GET_PORT_INFO: { u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); @@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } fec = fwcap_to_cc_fec(acaps); + adv_fc = fwcap_to_cc_pause(acaps); fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); @@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc || fec != lc->fec) { /* something changed */ + fc != lc->fc || adv_fc != lc->advertised_fc || + fec != lc->fec) { + /* something changed */ if (!link_ok && lc->link_ok) { lc->link_down_rc = linkdnrc; dev_warn_ratelimited(adapter->pdev_dev, @@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } lc->link_ok = link_ok; lc->speed = speed; + lc->advertised_fc = adv_fc; lc->fc = fc; lc->fec = fec; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index f6fc0875d5b0..f4d41f968afa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev, struct port_info *pi = netdev_priv(dev); pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; - pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; + pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0; + pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0; } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index ccca67cf4487..57cfd10a99ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -135,6 +135,7 @@ struct link_config { enum cc_pause requested_fc; /* flow control user has requested */ enum cc_pause fc; /* actual link flow control */ + enum cc_pause advertised_fc; /* actual advertised flow control */ enum cc_fec auto_fec; /* Forward Error Correction: */ enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 8a389d617a23..9d49ff211cc1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc) static void t4vf_handle_get_port_info(struct port_info *pi, const struct fw_port_cmd *cmd) { - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); - struct adapter *adapter = pi->adapter; + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; struct link_config *lc = &pi->link_cfg; - int link_ok, linkdnrc; - enum fw_port_type port_type; + struct adapter *adapter = pi->adapter; + unsigned int speed, fc, fec, adv_fc; enum fw_port_module_type mod_type; - unsigned int speed, fc, fec; - fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + int action, link_ok, linkdnrc; + enum fw_port_type port_type; /* Extract the various fields from the Port Information message. */ + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); switch (action) { case FW_PORT_ACTION_GET_PORT_INFO: { u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); @@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } fec = fwcap_to_cc_fec(acaps); + adv_fc = fwcap_to_cc_pause(acaps); fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); @@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc || fec != lc->fec) { /* something changed */ + fc != lc->fc || adv_fc != lc->advertised_fc || + fec != lc->fec) { + /* something changed */ if (!link_ok && lc->link_ok) { lc->link_down_rc = linkdnrc; dev_warn_ratelimited(adapter->pdev_dev, @@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } lc->link_ok = link_ok; lc->speed = speed; + lc->advertised_fc = adv_fc; lc->fc = fc; lc->fec = fec; diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index a8f4c69252ff..2814b96751b4 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -576,6 +576,8 @@ static int gmac_setup_txqs(struct net_device *netdev) if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { dev_warn(geth->dev, "TX queue base is not aligned\n"); + dma_free_coherent(geth->dev, len * sizeof(*desc_ring), + desc_ring, port->txq_dma_base); kfree(skb_tab); return -ENOMEM; } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 6a9d12dad5d9..a301f0095223 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, int page_offset; unsigned int sz; int *count_ptr; - int i; + int i, j; vaddr = phys_to_virt(addr); WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); @@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, SMP_CACHE_BYTES)); + dma_unmap_page(priv->rx_dma_dev, sg_addr, + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); + /* We may use multiple Rx pools */ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); if (!dpaa_bp) goto free_buffers; - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); - dma_unmap_page(priv->rx_dma_dev, sg_addr, - DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); if (!skb) { sz = dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, skb_add_rx_frag(skb, i - 1, head_page, frag_off, frag_len, dpaa_bp->size); } + /* Update the pool count for the current {cpu x bpool} */ + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); (*count_ptr)--; if (qm_sg_entry_is_final(&sgt[i])) @@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, return skb; free_buffers: - /* compensate sw bpool counter changes */ - for (i--; i >= 0; i--) { - dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); - if (dpaa_bp) { - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); - (*count_ptr)++; - } - } /* free all the SG entries */ - for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { - sg_addr = qm_sg_addr(&sgt[i]); + for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) { + sg_addr = qm_sg_addr(&sgt[j]); sg_vaddr = phys_to_virt(sg_addr); + /* all pages 0..i were unmaped */ + if (j > i) + dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]), + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); free_pages((unsigned long)sg_vaddr, 0); - dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); - if (dpaa_bp) { - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); - (*count_ptr)--; + /* counters 0..i-1 were decremented */ + if (j >= i) { + dpaa_bp = dpaa_bpid2pool(sgt[j].bpid); + if (dpaa_bp) { + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); + (*count_ptr)--; + } } - if (qm_sg_entry_is_final(&sgt[i])) + if (qm_sg_entry_is_final(&sgt[j])) break; } /* free the SGT fragment */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index a9503aea527f..6437fe6b9abf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -160,10 +160,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) irq = mc_dev->irqs[0]; ptp_qoriq->irq = irq->msi_desc->irq; - err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL, - dpaa2_ptp_irq_handler_thread, - IRQF_NO_SUSPEND | IRQF_ONESHOT, - dev_name(dev), ptp_qoriq); + err = request_threaded_irq(ptp_qoriq->irq, NULL, + dpaa2_ptp_irq_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(dev), ptp_qoriq); if (err < 0) { dev_err(dev, "devm_request_threaded_irq(): %d\n", err); goto err_free_mc_irq; @@ -173,18 +173,20 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) DPRTC_IRQ_INDEX, 1); if (err < 0) { dev_err(dev, "dprtc_set_irq_enable(): %d\n", err); - goto err_free_mc_irq; + goto err_free_threaded_irq; } err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps); if (err) - goto err_free_mc_irq; + goto err_free_threaded_irq; dpaa2_phc_index = ptp_qoriq->phc_index; dev_set_drvdata(dev, ptp_qoriq); return 0; +err_free_threaded_irq: + free_irq(ptp_qoriq->irq, ptp_qoriq); err_free_mc_irq: fsl_mc_free_irqs(mc_dev); err_unmap: diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 05c1899f6628..9294027e9d90 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); u32 __iomem *theregs = (u32 __iomem *)fep->hwp; + struct device *dev = &fep->pdev->dev; u32 *buf = (u32 *)regbuf; u32 i, off; + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return; regs->version = fec_enet_register_version; @@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev, off >>= 2; buf[off] = readl(&theregs[off]); } + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } static int fec_enet_get_ts_info(struct net_device *ndev, diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index edec61dfc868..9f52e72ff641 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -418,8 +418,6 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, rx->cnt = cnt; rx->fill_cnt += work_done; - /* restock desc ring slots */ - dma_wmb(); /* Ensure descs are visible before ringing doorbell */ gve_rx_write_doorbell(priv, rx); return gve_rx_work_pending(rx); } diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index f4889431f9b7..d0244feb0301 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -487,10 +487,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) * may have added descriptors without ringing the doorbell. */ - /* Ensure tx descs from a prior gve_tx are visible before - * ringing doorbell. - */ - dma_wmb(); gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_BUSY; } @@ -505,8 +501,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) return NETDEV_TX_OK; - /* Ensure tx descs are visible before ringing doorbell */ - dma_wmb(); gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 3e9b6d543c77..150a8ccfb8b1 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -543,9 +543,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); hip04_set_xmit_desc(priv, phys); - priv->tx_head = TX_NEXT(tx_head); count++; netdev_sent_queue(ndev, skb->len); + priv->tx_head = TX_NEXT(tx_head); stats->tx_bytes += skb->len; stats->tx_packets++; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 14ab20491fd0..eb69e5c81a4d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); if (unlikely(!skb)) { - netdev_err(ndev, "alloc rx skb fail\n"); ring->stats.sw_err_cnt++; return -ENOMEM; } @@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) container_of(napi, struct hns_nic_ring_data, napi); struct hnae_ring *ring = ring_data->ring; -try_again: clean_complete += ring_data->poll_one( ring_data, budget - clean_complete, ring_data->ex_process); @@ -1066,7 +1064,7 @@ try_again: napi_complete(napi); ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); } else { - goto try_again; + return budget; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 69545dd6c938..b3deb5e5ce29 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -54,6 +54,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting"); #define HNS3_INNER_VLAN_TAG 1 #define HNS3_OUTER_VLAN_TAG 2 +#define HNS3_MIN_TX_LEN 33U + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -1405,6 +1407,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) int bd_num = 0; int ret; + /* Hardware can only handle short frames above 32 bytes */ + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) + return NETDEV_TX_OK; + /* Prefetch the data used later */ prefetch(skb->data); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c90080781924..830791ab4619 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -184,7 +184,7 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, netdev_err(netdev, "Device down!\n"); return -ENODEV; } - if (retry--) + if (!retry--) break; if (wait_for_completion_timeout(comp_done, div_timeout)) return 0; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6c51b1bad8c4..37a2314d3e6b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -185,13 +185,12 @@ struct e1000_phy_regs { /* board specific private data structure */ struct e1000_adapter { + struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct timer_list blink_timer; struct work_struct reset_task; - struct delayed_work watchdog_task; - - struct workqueue_struct *e1000_workqueue; + struct work_struct watchdog_task; const struct e1000_info *ei; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fe7997c18a10..7c5b18d87b49 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ @@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ @@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -4284,6 +4281,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) napi_synchronize(&adapter->napi); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); spin_lock(&adapter->stats64_lock); @@ -5155,11 +5153,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) } } +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(struct timer_list *t) +{ + struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, - watchdog_task.work); + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; @@ -5407,9 +5419,8 @@ link_up: /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, - round_jiffies(2 * HZ)); + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); } #define E1000_TX_FLAGS_CSUM 0x00000001 @@ -7449,21 +7460,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, - e1000e_driver_name); - - if (!adapter->e1000_workqueue) { - err = -ENOMEM; - goto err_workqueue; - } - - INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task); - queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task, - 0); - + timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); @@ -7557,9 +7558,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_register: - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); -err_workqueue: if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_release_hw_control(adapter); err_eeprom: @@ -7604,17 +7602,15 @@ static void e1000_remove(struct pci_dev *pdev) * from being rescheduled. */ set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->downshift_task); cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task); - cancel_delayed_work(&adapter->watchdog_task); - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); - if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { cancel_work_sync(&adapter->tx_hwtstamp_work); if (adapter->tx_hwtstamp_skb) { diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index cb6367334ca7..4833187bd259 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1152,7 +1152,7 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags); static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) { - return !!vsi->xdp_prog; + return !!READ_ONCE(vsi->xdp_prog); } int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 9f0a4e92a231..37514a75f928 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -536,6 +536,11 @@ static void i40e_set_hw_flags(struct i40e_hw *hw) (aq->api_maj_ver == 1 && aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; /* fall through */ default: break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1ccabeafa44c..2c5af6d4a6b1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6823,8 +6823,8 @@ void i40e_down(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_clean_tx_ring(vsi->tx_rings[i]); if (i40e_enabled_xdp_vsi(vsi)) { - /* Make sure that in-progress ndo_xdp_xmit - * calls are completed. + /* Make sure that in-progress ndo_xdp_xmit and + * ndo_xsk_wakeup calls are completed. */ synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[i]); @@ -12546,8 +12546,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, old_prog = xchg(&vsi->xdp_prog, prog); - if (need_reset) + if (need_reset) { + if (!prog) + /* Wait until ndo_xsk_wakeup completes. */ + synchronize_rcu(); i40e_reset_and_rebuild(pf, true, true); + } for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6a3f0fc56c3b..69523ac85639 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2322,6 +2322,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, } /** + * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL + * @vqs: virtchnl_queue_select structure containing bitmaps to validate + * + * Returns true if validation was successful, else false. + */ +static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) +{ + if ((!vqs->rx_queues && !vqs->tx_queues) || + vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || + vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) + return false; + + return true; +} + +/** * i40e_vc_enable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2346,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } - if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { + if (i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2408,9 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } - if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) || - vqs->rx_queues > I40E_MAX_VF_QUEUES || - vqs->tx_queues > I40E_MAX_VF_QUEUES) { + if (i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = I40E_ERR_PARAM; goto error_param; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index d07e1a890428..f73cd917c44f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -787,8 +787,12 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; struct i40e_ring *ring; + if (test_bit(__I40E_CONFIG_BUSY, pf->state)) + return -ENETDOWN; + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 29de3ae96ef2..bd1b1ed323f4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_del_cloud_filter(struct iavf_adapter *adapter); +struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 821987da5698..8e16be960e96 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, * * Returns ptr to the filter object or NULL when no memory available. **/ -static struct -iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, - const u8 *macaddr) +struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { struct iavf_mac_filter *f; @@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work) struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; + struct iavf_mac_filter *f, *ftmp; struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; - struct iavf_mac_filter *f; u32 reg_val; int i = 0, err; bool running; @@ -2181,6 +2180,16 @@ continue_reset: spin_lock_bh(&adapter->mac_vlan_list_lock); + /* Delete filter for the current MAC address, it could have + * been changed by the PF via administratively set MAC. + * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. + */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { + list_del(&f->list); + kfree(f); + } + } /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index c46770eba320..1ab9cb339acb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } + spin_lock_bh(&adapter->mac_vlan_list_lock); + iavf_add_filter(adapter, adapter->hw.mac.addr); + spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_process_config(adapter); } break; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 8a6ef3514129..438b42ce2cd9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) dev_spec->module_plugged = true; if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { hw->phy.media_type = e1000_media_type_internal_serdes; - } else if (eth_flags->e100_base_fx) { + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { dev_spec->sgmii_active = true; hw->phy.media_type = e1000_media_type_internal_serdes; } else if (eth_flags->e1000_base_t) { @@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; } - /* do not change link mode for 100BaseFX */ - if (dev_spec->eth_flags.e100_base_fx) - break; - /* change current link mode setting */ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; - if (hw->phy.media_type == e1000_media_type_copper) + if (dev_spec->sgmii_active) ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; else ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 4690d6c87f39..445fbdce3e25 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev, advertising &= ~ADVERTISED_1000baseKX_Full; } } - if (eth_flags->e100_base_fx) { + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { supported |= SUPPORTED_100baseT_Full; advertising |= ADVERTISED_100baseT_Full; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 25c097cd8100..a2b2ad1f60b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; - u64 action; + u8 queue; spin_lock(&adapter->fdir_perfect_lock); @@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { - action = filter->action; - if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) - action = - (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + if (filter->action == IXGBE_FDIR_DROP_QUEUE) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); + + if (!vf && (ring >= adapter->num_rx_queues)) { + e_err(drv, "FDIR restore failed without VF, ring: %u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n", + vf, ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - (action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[action]->reg_idx); + &filter->filter, filter->sw_idx, queue); } spin_unlock(&adapter->fdir_perfect_lock); @@ -10261,7 +10278,12 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) /* If transitioning XDP modes reconfigure rings */ if (need_reset) { - int err = ixgbe_setup_tc(dev, adapter->hw_tcs); + int err; + + if (!prog) + /* Wait until ndo_xsk_wakeup completes. */ + synchronize_rcu(); + err = ixgbe_setup_tc(dev, adapter->hw_tcs); if (err) { rcu_assign_pointer(adapter->xdp_prog, old_prog); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index d6feaacfbf89..b43be9f14105 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -709,10 +709,14 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) if (qid >= adapter->num_xdp_queues) return -ENXIO; - if (!adapter->xdp_ring[qid]->xsk_umem) + ring = adapter->xdp_ring[qid]; + + if (test_bit(__IXGBE_TX_DISABLED, &ring->state)) + return -ENETDOWN; + + if (!ring->xsk_umem) return -ENXIO; - ring = adapter->xdp_ring[qid]; if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { u64 eics = BIT_ULL(ring->q_vector->v_idx); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 076f2da36f27..64ec0e7c64b4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) struct ixgbe_hw *hw = &adapter->hw; int count = 0; - if ((netdev_uc_count(netdev)) > 10) { - pr_err("Too many unicast filters - No Space\n"); - return -ENOSPC; - } - if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 71a872d46bc4..67ad8b8b127d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2081,7 +2081,11 @@ static int mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp) { - u32 ret, act = bpf_prog_run_xdp(prog, xdp); + unsigned int len; + u32 ret, act; + + len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; + act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_PASS: @@ -2094,9 +2098,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, if (err) { ret = MVNETA_XDP_DROPPED; __page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + virt_to_head_page(xdp->data), + len, true); } else { ret = MVNETA_XDP_REDIR; } @@ -2106,9 +2109,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) __page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + virt_to_head_page(xdp->data), + len, true); break; default: bpf_warn_invalid_xdp_action(act); @@ -2119,8 +2121,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, case XDP_DROP: __page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + len, true); ret = MVNETA_XDP_DROPPED; break; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 62dc2f362a16..14e372cda7f4 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3680,7 +3680,7 @@ static int mvpp2_open(struct net_device *dev) valid = true; } - if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { + if (priv->hw_version == MVPP22 && port->link_irq) { err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, dev->name, port); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c index eaf08f7ad128..64ed725aec28 100644 --- a/drivers/net/ethernet/mellanox/mlx4/crdump.c +++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c @@ -182,7 +182,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev) crdump_enable_crspace_access(dev, cr_space); /* Get the available snapshot ID for the dumps */ - id = devlink_region_shapshot_id_get(devlink); + id = devlink_region_snapshot_id_get(devlink); /* Try to capture dumps */ mlx4_crdump_collect_crspace(dev, cr_space, id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2c16add0b642..9c8427698238 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -760,7 +760,7 @@ enum { MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, MLX5E_STATE_XDP_TX_ENABLED, - MLX5E_STATE_XDP_OPEN, + MLX5E_STATE_XDP_ACTIVE, }; struct mlx5e_rqt { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 68d593074f6c..d48292ccda29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -122,6 +122,22 @@ enum { #endif }; +#define MLX5E_TTC_NUM_GROUPS 3 +#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) +#define MLX5E_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ + MLX5E_TTC_GROUP2_SIZE +\ + MLX5E_TTC_GROUP3_SIZE) + +#define MLX5E_INNER_TTC_NUM_GROUPS 3 +#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) +#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ + MLX5E_INNER_TTC_GROUP2_SIZE +\ + MLX5E_INNER_TTC_GROUP3_SIZE) + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_table { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 1d6b58860da6..3a975641f902 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv, struct devlink_health_reporter *reporter, char *err_str, struct mlx5e_err_ctx *err_ctx) { - if (!reporter) { - netdev_err(priv->netdev, err_str); + netdev_err(priv->netdev, err_str); + + if (!reporter) return err_ctx->recover(&err_ctx->ctx); - } + return devlink_health_report(reporter, err_str, err_ctx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 36ac1e3816b9..d7587f40ecae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -75,12 +75,18 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) { set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); + + if (priv->channels.params.xdp_prog) + set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); } static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) { + if (priv->channels.params.xdp_prog) + clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); - /* let other device's napi(s) see our new state */ + /* Let other device's napi(s) and XSK wakeups see our new state. */ synchronize_rcu(); } @@ -89,19 +95,9 @@ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); } -static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv) -{ - set_bit(MLX5E_STATE_XDP_OPEN, &priv->state); -} - -static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv) -{ - clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state); -} - -static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv) +static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv) { - return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state); + return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); } static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 631af8dee517..c28cbae42331 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -144,6 +144,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) { clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); napi_synchronize(&c->napi); + synchronize_rcu(); /* Sync with the XSK wakeup. */ mlx5e_close_rq(&c->xskrq); mlx5e_close_cq(&c->xskrq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 87827477d38c..fe2d596cb361 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -14,7 +14,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) struct mlx5e_channel *c; u16 ix; - if (unlikely(!mlx5e_xdp_is_open(priv))) + if (unlikely(!mlx5e_xdp_is_active(priv))) return -ENETDOWN; if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix))) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 15b7f0f1427c..73d3dc07331f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -904,22 +904,6 @@ del_rules: return err; } -#define MLX5E_TTC_NUM_GROUPS 3 -#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) -#define MLX5E_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ - MLX5E_TTC_GROUP2_SIZE +\ - MLX5E_TTC_GROUP3_SIZE) - -#define MLX5E_INNER_TTC_NUM_GROUPS 3 -#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) -#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ - MLX5E_INNER_TTC_GROUP2_SIZE +\ - MLX5E_INNER_TTC_GROUP3_SIZE) - static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, bool use_ipv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 4980e80a5e85..4997b8a51994 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3000,12 +3000,9 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - bool is_xdp = priv->channels.params.xdp_prog; int err; set_bit(MLX5E_STATE_OPENED, &priv->state); - if (is_xdp) - mlx5e_xdp_set_open(priv); err = mlx5e_open_channels(priv, &priv->channels); if (err) @@ -3020,8 +3017,6 @@ int mlx5e_open_locked(struct net_device *netdev) return 0; err_clear_state_opened_flag: - if (is_xdp) - mlx5e_xdp_set_closed(priv); clear_bit(MLX5E_STATE_OPENED, &priv->state); return err; } @@ -3053,8 +3048,6 @@ int mlx5e_close_locked(struct net_device *netdev) if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - if (priv->channels.params.xdp_prog) - mlx5e_xdp_set_closed(priv); clear_bit(MLX5E_STATE_OPENED, &priv->state); netif_carrier_off(priv->netdev); @@ -4371,16 +4364,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return 0; } -static int mlx5e_xdp_update_state(struct mlx5e_priv *priv) -{ - if (priv->channels.params.xdp_prog) - mlx5e_xdp_set_open(priv); - else - mlx5e_xdp_set_closed(priv); - - return 0; -} - static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4415,7 +4398,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) mlx5e_set_rq_type(priv->mdev, &new_channels.params); old_prog = priv->channels.params.xdp_prog; - err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) goto unlock; } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9b32a9c0f497..024e1cddfd0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -592,7 +592,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; - ft_attr->max_fte = MLX5E_NUM_TT; + ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; ft_attr->prio = MLX5E_TC_PRIO; } @@ -2999,6 +2999,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info return kmemdup(tun_info, tun_size, GFP_KERNEL); } +static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index, + struct mlx5e_encap_entry *e, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < out_index; i++) { + if (flow->encaps[i].e != e) + continue; + NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action"); + netdev_err(priv->netdev, "can't duplicate encap action\n"); + return true; + } + + return false; +} + static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct net_device *mirred_dev, @@ -3034,6 +3053,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, /* must verify if encap is valid or not */ if (e) { + /* Check that entry was not already attached to this flow */ + if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { + err = -EOPNOTSUPP; + goto out_err; + } + mutex_unlock(&esw->offloads.encap_tbl_lock); wait_for_completion(&e->res_ready); @@ -3220,6 +3245,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, same_hw_devs(priv, netdev_priv(out_dev)); } +static bool is_duplicated_output_device(struct net_device *dev, + struct net_device *out_dev, + int *ifindexes, int if_count, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < if_count; i++) { + if (ifindexes[i] == out_dev->ifindex) { + NL_SET_ERR_MSG_MOD(extack, + "can't duplicate output to same device"); + netdev_err(dev, "can't duplicate output to same device: %s\n", + out_dev->name); + return true; + } + } + + return false; +} + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow *flow, @@ -3231,11 +3276,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; const struct ip_tunnel_info *info = NULL; + int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; bool ft_flow = mlx5e_is_ft_flow(flow); const struct flow_action_entry *act; + int err, i, if_count = 0; bool encap = false; u32 action = 0; - int err, i; if (!flow_action_has_entries(flow_action)) return -EINVAL; @@ -3312,6 +3358,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); struct net_device *uplink_upper; + if (is_duplicated_output_device(priv->netdev, + out_dev, + ifindexes, + if_count, + extack)) + return -EOPNOTSUPP; + + ifindexes[if_count] = out_dev->ifindex; + if_count++; + rcu_read_lock(); uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9a48c4310887..8c5df6c7d7b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node) } } -static void del_sw_fte_rcu(struct rcu_head *head) -{ - struct fs_fte *fte = container_of(head, struct fs_fte, rcu); - struct mlx5_flow_steering *steering = get_steering(&fte->node); - - kmem_cache_free(steering->ftes_cache, fte); -} - static void del_sw_fte(struct fs_node *node) { + struct mlx5_flow_steering *steering = get_steering(node); struct mlx5_flow_group *fg; struct fs_fte *fte; int err; @@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node) rhash_fte); WARN_ON(err); ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); - - call_rcu(&fte->rcu, del_sw_fte_rcu); + kmem_cache_free(steering->ftes_cache, fte); } static void del_hw_flow_group(struct fs_node *node) @@ -1633,47 +1625,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head) } static struct fs_fte * -lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value) +lookup_fte_locked(struct mlx5_flow_group *g, + const u32 *match_value, + bool take_write) { struct fs_fte *fte_tmp; - nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); - - fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte); - if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { - fte_tmp = NULL; - goto out; - } - - if (!fte_tmp->node.active) { - tree_put_node(&fte_tmp->node, false); - fte_tmp = NULL; - goto out; - } - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); - -out: - up_write_ref_node(&g->node, false); - return fte_tmp; -} - -static struct fs_fte * -lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) -{ - struct fs_fte *fte_tmp; - - if (!tree_get_node(&g->node)) - return NULL; - - rcu_read_lock(); - fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte); + if (take_write) + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); + else + nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); + fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, + rhash_fte); if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { - rcu_read_unlock(); fte_tmp = NULL; goto out; } - rcu_read_unlock(); - if (!fte_tmp->node.active) { tree_put_node(&fte_tmp->node, false); fte_tmp = NULL; @@ -1681,19 +1648,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) } nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); - out: - tree_put_node(&g->node, false); - return fte_tmp; -} - -static struct fs_fte * -lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write) -{ - if (write) - return lookup_fte_for_write_locked(g, match_value); + if (take_write) + up_write_ref_node(&g->node, false); else - return lookup_fte_for_read_locked(g, match_value); + up_read_ref_node(&g->node); + return fte_tmp; } static struct mlx5_flow_handle * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index e8cd997f413e..c2621b911563 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -203,7 +203,6 @@ struct fs_fte { enum fs_fte_status status; struct mlx5_fc *counter; struct rhash_head hash; - struct rcu_head rcu; int modify_mask; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 173e2c12e1c7..cf7b8da0f010 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) if (err) goto err_load; + if (boot) { + err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + } + if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1210,6 +1216,9 @@ out: return err; err_reg_dev: + if (boot) + mlx5_devlink_unregister(priv_to_devlink(dev)); +err_devlink_reg: mlx5_unload(dev); err_load: if (boot) @@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) request_module_nowait(MLX5_IB_MOD); - err = mlx5_devlink_register(devlink, &pdev->dev); - if (err) - goto clean_load; - err = mlx5_crdump_enable(dev); if (err) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); @@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_save_state(pdev); return 0; -clean_load: - mlx5_unload_one(dev, true); - err_load_one: mlx5_pci_close(dev); pci_init_err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 32e94d2ee5e4..e4cff7abb348 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, /* We need to copy the refcount since this ste * may have been traversed several times */ - refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount)); + new_ste->refcount = cur_ste->refcount; /* Link old STEs rule_mem list to the new ste */ mlx5dr_rule_update_rule_member(cur_ste, new_ste); @@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, if (!rule_mem) return -ENOMEM; + INIT_LIST_HEAD(&rule_mem->list); + INIT_LIST_HEAD(&rule_mem->use_ste_list); + rule_mem->ste = ste; list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index a5a266983dd3..c6c7d1defbd7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) if (dst->next_htbl) dst->next_htbl->pointing_ste = dst; - refcount_set(&dst->refcount, refcount_read(&src->refcount)); + dst->refcount = src->refcount; INIT_LIST_HEAD(&dst->rule_list); list_splice_tail_init(&src->rule_list, &dst->rule_list); @@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste) bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) { - return !refcount_read(&ste->refcount); + return !ste->refcount; } /* Init one ste as a pattern for ste data array */ @@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, htbl->ste_arr = chunk->ste_arr; htbl->hw_ste_arr = chunk->hw_ste_arr; htbl->miss_list = chunk->miss_list; - refcount_set(&htbl->refcount, 0); + htbl->refcount = 0; for (i = 0; i < chunk->num_of_entries; i++) { struct mlx5dr_ste *ste = &htbl->ste_arr[i]; ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; ste->htbl = htbl; - refcount_set(&ste->refcount, 0); + ste->refcount = 0; INIT_LIST_HEAD(&ste->miss_list_node); INIT_LIST_HEAD(&htbl->miss_list[i]); INIT_LIST_HEAD(&ste->rule_list); @@ -713,7 +713,7 @@ out_free_htbl: int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) { - if (refcount_read(&htbl->refcount)) + if (htbl->refcount) return -EBUSY; mlx5dr_icm_free_chunk(htbl->chunk); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 290fe61c33d0..3fdf4a5eb031 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx; struct mlx5dr_ste { u8 *hw_ste; /* refcount: indicates the num of rules that using this ste */ - refcount_t refcount; + u32 refcount; /* attached to the miss_list head at each htbl entry */ struct list_head miss_list_node; @@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl { struct mlx5dr_ste_htbl { u8 lu_type; u16 byte_mask; - refcount_t refcount; + u32 refcount; struct mlx5dr_icm_chunk *chunk; struct mlx5dr_ste *ste_arr; u8 *hw_ste_arr; @@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl); static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl) { - if (refcount_dec_and_test(&htbl->refcount)) + htbl->refcount--; + if (!htbl->refcount) mlx5dr_ste_htbl_free(htbl); } static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl) { - refcount_inc(&htbl->refcount); + htbl->refcount++; } /* STE utils */ @@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste, struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher) { - if (refcount_dec_and_test(&ste->refcount)) + ste->refcount--; + if (!ste->refcount) mlx5dr_ste_free(ste, matcher, nic_matcher); } /* initial as 0, increased only when ste appears in a new rule */ static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste) { - refcount_inc(&ste->refcount); + ste->refcount++; } void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c index 544344ac4894..79057af4fe99 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netlink.h> +#include <linux/vmalloc.h> #include <linux/xz.h> #include "mlxfw_mfa2.h" #include "mlxfw_mfa2_file.h" @@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file, comp_size = be32_to_cpu(comp->size); comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len; - comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL); + comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size); if (!comp_data) return ERR_PTR(-ENOMEM); comp_data->comp.data_size = comp_size; @@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file, comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len; return &comp_data->comp; err_out: - kfree(comp_data); + vfree(comp_data); return ERR_PTR(err); } @@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp) const struct mlxfw_mfa2_comp_data *comp_data; comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp); - kfree(comp_data); + vfree(comp_data); } void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5294a1622643..af30e8a76682 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5472,6 +5472,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1, + MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP, __MLXSW_REG_HTGT_TRAP_GROUP_MAX, MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 556dca328bb5..8ed15199eb4f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -860,23 +860,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, u64 len; int err; + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) return NETDEV_TX_BUSY; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } - if (eth_skb_pad(skb)) { this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); return NETDEV_TX_OK; @@ -1215,6 +1209,9 @@ static void update_stats_cache(struct work_struct *work) periodic_hw_stats.update_dw.work); if (!netif_carrier_ok(mlxsw_sp_port->dev)) + /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as + * necessary when port goes down. + */ goto out; mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, @@ -4324,6 +4321,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, return 0; } +static void +mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int i; + + for (i = 0; i < TC_MAX_QUEUE; i++) + mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; +} + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { @@ -4345,6 +4351,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, } else { netdev_info(mlxsw_sp_port->dev, "link down\n"); netif_carrier_off(mlxsw_sp_port->dev); + mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); } } @@ -4542,8 +4549,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), + MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@ -4626,6 +4633,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) rate = 19 * 1024; burst_size = 12; break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: + rate = 360; + burst_size = 7; + break; default: continue; } @@ -4665,6 +4676,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: priority = 5; tc = 5; break; @@ -5127,6 +5139,27 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); } +static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; + mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; + mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; + mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; + mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; + mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); +} + static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -5629,7 +5662,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = { static struct mlxsw_driver mlxsw_sp3_driver = { .kind = mlxsw_sp3_driver_name, .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp2_init, + .init = mlxsw_sp3_init, .fini = mlxsw_sp_fini, .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, .port_split = mlxsw_sp_port_split, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 68cc6737d45c..0124bfe1963b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -195,6 +195,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } +static u64 +mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->backlog[tclass_num] + + xstats->backlog[tclass_num + 8]; +} + +static u64 +mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->tail_drop[tclass_num] + + xstats->tail_drop[tclass_num + 8]; +} + static void mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, u8 prio_bitmap, u64 *tx_packets, @@ -269,7 +283,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, &stats_base->tx_bytes); red_base->prob_mark = xstats->ecn; red_base->prob_drop = xstats->wred_drop[tclass_num]; - red_base->pdrop = xstats->tail_drop[tclass_num]; + red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num); stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; stats_base->drops = red_base->prob_drop + red_base->pdrop; @@ -370,7 +384,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; marks = xstats->ecn - xstats_base->prob_mark; - pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - + xstats_base->pdrop; res->pdrop += pdrops; res->prob_drop += early_drops; @@ -403,9 +418,10 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - stats_base->overlimits; - drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - + drops = xstats->wred_drop[tclass_num] + + mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - stats_base->drops; - backlog = xstats->backlog[tclass_num]; + backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num); _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); stats_ptr->qstats->overlimits += overlimits; @@ -576,9 +592,9 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, tx_packets = stats->tx_packets - stats_base->tx_packets; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - drops += xstats->tail_drop[i]; + drops += mlxsw_sp_xstats_tail_drop(xstats, i); drops += xstats->wred_drop[i]; - backlog += xstats->backlog[i]; + backlog += mlxsw_sp_xstats_backlog(xstats, i); } drops = drops - stats_base->drops; @@ -614,7 +630,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, stats_base->drops = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - stats_base->drops += xstats->tail_drop[i]; + stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i); stats_base->drops += xstats->wred_drop[i]; } @@ -651,6 +667,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle) return 0; + if (!p->child_handle) { + /* This is an invisible FIFO replacing the original Qdisc. + * Ignore it--the original Qdisc's destroy will follow. + */ + return 0; + } + /* See if the grafted qdisc is already offloaded on any tclass. If so, * unoffload it. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 30bfe3880faf..8290e82240fc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5742,8 +5742,13 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp_fib6_rt_should_ignore(rt)) return; + /* Multipath routes are first added to the FIB trie and only then + * notified. If we vetoed the addition, we will get a delete + * notification for a route we do not have. Therefore, do not warn if + * route was not found. + */ fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); - if (WARN_ON(!fib6_entry)) + if (!fib6_entry) return; /* If not all the nexthops are deleted, then only reduce the nexthop @@ -7074,6 +7079,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { rif = mlxsw_sp->router->rifs[i]; + if (rif && rif->ops && + rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB) + continue; if (rif && rif->dev && rif->dev != dev && !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, mlxsw_sp->mac_mask)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index de6cb22f68b1..f0e98ec8f1ee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, u64 len; int err; + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info)) return NETDEV_TX_BUSY; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } mlxsw_sx_txhdr_construct(skb, &tx_info); /* TX header is consumed by HW on the way so we shouldn't count its * bytes as being sent. diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 7c4a15e967df..5defd31d481c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -65,17 +65,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { - if (priv->active_mem_unit == priv->total_mem_units) { - priv->stats_ids.init_unalloc--; - priv->active_mem_unit = 0; - } - *stats_context_id = FIELD_PREP(NFP_FL_STAT_ID_STAT, priv->stats_ids.init_unalloc - 1) | FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, priv->active_mem_unit); - priv->active_mem_unit++; + + if (++priv->active_mem_unit == priv->total_mem_units) { + priv->stats_ids.init_unalloc--; + priv->active_mem_unit = 0; + } + return 0; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index d6cfe4ffbaf3..d1ce4531d01a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev, netif_addr_lock_bh(ndev); mc_count = netdev_mc_count(ndev); - if (mc_count < 64) { + if (mc_count <= 64) { netdev_for_each_mc_addr(ha, ndev) { ether_addr_copy(temp, ha->addr); temp += ETH_ALEN; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 481b096e984d..34fa3917eb33 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1406,6 +1406,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->rx_buf_seg_size = roundup_pow_of_two(size); } else { rxq->rx_buf_seg_size = PAGE_SIZE; + edev->ndev->features &= ~NETIF_F_GRO_HW; } /* Allocate the parallel driver ring for Rx buffers */ @@ -1450,6 +1451,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } } + edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); if (!edev->gro_disable) qede_set_tpa_param(rxq); err: @@ -1702,8 +1704,6 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, queue_id); } - - edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); } static int qede_set_real_num_queues(struct qede_dev *edev) diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b4b8ba00ee01..986f26578d34 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) int err; for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { @@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) ql_free_large_buffers(qdev); return -ENOMEM; } else { - - lrg_buf_cb = &qdev->lrg_buf[i]; - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); lrg_buf_cb->index = i; - lrg_buf_cb->skb = skb; /* * We save some space to copy the ethhdr from first * buffer @@ -2792,6 +2791,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) return -ENOMEM; } + lrg_buf_cb->skb = skb; dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e19b49c4013e..3591285250e1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) if (cd->tsu) { add_tsu_reg(ARSTR); add_tsu_reg(TSU_CTRST); - add_tsu_reg(TSU_FWEN0); - add_tsu_reg(TSU_FWEN1); - add_tsu_reg(TSU_FCM); - add_tsu_reg(TSU_BSYSL0); - add_tsu_reg(TSU_BSYSL1); - add_tsu_reg(TSU_PRISL0); - add_tsu_reg(TSU_PRISL1); - add_tsu_reg(TSU_FWSL0); - add_tsu_reg(TSU_FWSL1); + if (cd->dual_port) { + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + } add_tsu_reg(TSU_FWSLC); - add_tsu_reg(TSU_QTAGM0); - add_tsu_reg(TSU_QTAGM1); - add_tsu_reg(TSU_FWSR); - add_tsu_reg(TSU_FWINMK); - add_tsu_reg(TSU_ADQT0); - add_tsu_reg(TSU_ADQT1); - add_tsu_reg(TSU_VTAG0); - add_tsu_reg(TSU_VTAG1); + if (cd->dual_port) { + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + } add_tsu_reg(TSU_ADSBSY); add_tsu_reg(TSU_TEN); add_tsu_reg(TSU_POST1); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index c56fcbb37066..52ed111d98f4 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2296,7 +2296,7 @@ __setup("sxgbeeth=", sxgbe_cmdline_opt); -MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver"); +MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver"); MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value"); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 992c773620ec..7a38d7f282a1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1472,6 +1472,12 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, n_xdp_tx = num_possible_cpus(); n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES); + vec_count = pci_msix_vec_count(efx->pci_dev); + if (vec_count < 0) + return vec_count; + + max_channels = min_t(unsigned int, vec_count, max_channels); + /* Check resources. * We need a channel per event queue, plus a VI per tx queue. * This may be more pessimistic than it needs to be. @@ -1493,11 +1499,6 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, n_xdp_tx, n_xdp_ev); } - n_channels = min(n_channels, max_channels); - - vec_count = pci_msix_vec_count(efx->pci_dev); - if (vec_count < 0) - return vec_count; if (vec_count < n_channels) { netif_err(efx, drv, efx->net_dev, "WARNING: Insufficient MSI-X vectors available (%d < %u).\n", @@ -1507,11 +1508,9 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, n_channels = vec_count; } - efx->n_channels = n_channels; + n_channels = min(n_channels, max_channels); - /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */ - if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx)) - n_channels--; + efx->n_channels = n_channels; /* Ignore XDP tx channels when creating rx channels. */ n_channels -= efx->n_xdp_channels; @@ -1531,11 +1530,10 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, efx->n_rx_channels = n_channels; } - if (efx->n_xdp_channels) - efx->xdp_channel_offset = efx->tx_channel_offset + - efx->n_tx_channels; - else - efx->xdp_channel_offset = efx->n_channels; + efx->n_rx_channels = min(efx->n_rx_channels, parallelism); + efx->n_tx_channels = min(efx->n_tx_channels, parallelism); + + efx->xdp_channel_offset = n_channels; netif_dbg(efx, drv, efx->net_dev, "Allocating %u RX channels\n", @@ -1550,6 +1548,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, static int efx_probe_interrupts(struct efx_nic *efx) { unsigned int extra_channels = 0; + unsigned int rss_spread; unsigned int i, j; int rc; @@ -1631,8 +1630,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { if (!efx->extra_channel_type[i]) continue; - if (efx->interrupt_mode != EFX_INT_MODE_MSIX || - efx->n_channels <= extra_channels) { + if (j <= efx->tx_channel_offset + efx->n_tx_channels) { efx->extra_channel_type[i]->handle_no_channel(efx); } else { --j; @@ -1643,16 +1641,17 @@ static int efx_probe_interrupts(struct efx_nic *efx) } } + rss_spread = efx->n_rx_channels; /* RSS might be usable on VFs even if it is disabled on the PF */ #ifdef CONFIG_SFC_SRIOV if (efx->type->sriov_wanted) { - efx->rss_spread = ((efx->n_rx_channels > 1 || + efx->rss_spread = ((rss_spread > 1 || !efx->type->sriov_wanted(efx)) ? - efx->n_rx_channels : efx_vf_size(efx)); + rss_spread : efx_vf_size(efx)); return 0; } #endif - efx->rss_spread = efx->n_rx_channels; + efx->rss_spread = rss_spread; return 0; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 1f88212be085..dfd5182d9e47 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1533,9 +1533,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel) static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) { - return efx_channel_is_xdp_tx(channel) || - (channel->type && channel->type->want_txqs && - channel->type->want_txqs(channel)); + return true; } static inline struct efx_tx_queue * diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index ef52b24ad9e7..c29bf862a94c 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -96,11 +96,12 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, void efx_rx_config_page_split(struct efx_nic *efx) { - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + + XDP_PACKET_HEADROOM, EFX_RX_BUF_ALIGNMENT); efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / - (efx->rx_page_buf_step + XDP_PACKET_HEADROOM)); + efx->rx_page_buf_step); efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / efx->rx_bufs_per_page; efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, @@ -190,14 +191,13 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) page_offset = sizeof(struct efx_rx_page_state); do { - page_offset += XDP_PACKET_HEADROOM; - dma_addr += XDP_PACKET_HEADROOM; - index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + efx->rx_ip_align; + rx_buf->dma_addr = dma_addr + efx->rx_ip_align + + XDP_PACKET_HEADROOM; rx_buf->page = page; - rx_buf->page_offset = page_offset + efx->rx_ip_align; + rx_buf->page_offset = page_offset + efx->rx_ip_align + + XDP_PACKET_HEADROOM; rx_buf->len = efx->rx_dma_len; rx_buf->flags = 0; ++rx_queue->added_count; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f7e927ad67fa..b7032422393f 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev, phy_ethtool_get_wol(ndev->phydev, wol); } -static int ave_ethtool_set_wol(struct net_device *ndev, - struct ethtool_wolinfo *wol) +static int __ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) { - int ret; - if (!ndev->phydev || (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) return -EOPNOTSUPP; - ret = phy_ethtool_set_wol(ndev->phydev, wol); + return phy_ethtool_set_wol(ndev->phydev, wol); +} + +static int ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + int ret; + + ret = __ave_ethtool_set_wol(ndev, wol); if (!ret) device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); @@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev) /* set wol initial state disabled */ wol.wolopts = 0; - ave_ethtool_set_wol(ndev, &wol); + __ave_ethtool_set_wol(ndev, &wol); if (!phy_interface_is_rgmii(phydev)) phy_set_max_speed(phydev, SPEED_100); @@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev) ave_ethtool_get_wol(ndev, &wol); wol.wolopts = priv->wolopts; - ave_ethtool_set_wol(ndev, &wol); + __ave_ethtool_set_wol(ndev, &wol); if (ndev->phydev) { ret = phy_resume(ndev->phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b210e987a1db..94f94686cf7d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -365,9 +365,8 @@ struct dma_features { unsigned int arpoffsel; }; -/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ -#define BUF_SIZE_16KiB 16384 -/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ +/* RX Buffer size must be multiple of 4/8/16 bytes */ +#define BUF_SIZE_16KiB 16368 #define BUF_SIZE_8KiB 8188 #define BUF_SIZE_4KiB 4096 #define BUF_SIZE_2KiB 2048 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index bd6c01004913..0e2fa14f1423 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) struct device *dev = dwmac->dev; const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS]; struct meson8b_dwmac_clk_configs *clk_configs; + static const struct clk_div_table div_table[] = { + { .div = 2, .val = 2, }, + { .div = 3, .val = 3, }, + { .div = 4, .val = 4, }, + { .div = 5, .val = 5, }, + { .div = 6, .val = 6, }, + { .div = 7, .val = 7, }, + }; clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); if (!clk_configs) @@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0; clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; - clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED | - CLK_DIVIDER_ALLOW_ZERO | - CLK_DIVIDER_ROUND_CLOSEST; + clk_configs->m250_div.table = div_table; + clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO | + CLK_DIVIDER_ROUND_CLOSEST; clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1, &clk_divider_ops, &clk_configs->m250_div.hw); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 1c8d84ed8410..01b484cb177e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -957,6 +957,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) /* default */ break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII; break; case PHY_INTERFACE_MODE_RMII: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 26353ef616b8..7d40760e9ba8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv) * rate, which then uses the auto-reparenting feature of the * clock driver, and enabling/disabling the clock. */ - if (gmac->interface == PHY_INTERFACE_MODE_RGMII) { + if (phy_interface_mode_is_rgmii(gmac->interface)) { clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); clk_prepare_enable(gmac->tx_clk); gmac->clk_enabled = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 3b6e559aa0b9..ef8a07c68ca7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -343,6 +343,8 @@ #define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x))) #define XGMAC_RxPBL GENMASK(21, 16) #define XGMAC_RxPBL_SHIFT 16 +#define XGMAC_RBSZ GENMASK(14, 1) +#define XGMAC_RBSZ_SHIFT 1 #define XGMAC_RXST BIT(0) #define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x))) #define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x))) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 22a7f0cc1b90..f3f08ccc379b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -482,7 +482,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) u32 value; value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); - value |= bfsize << 1; + value &= ~XGMAC_RBSZ; + value |= bfsize << XGMAC_RBSZ_SHIFT; writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index bbc65bd332a8..80d59b775907 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -46,7 +46,7 @@ #include "dwxgmac2.h" #include "hwif.h" -#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) +#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ @@ -106,6 +106,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); #ifdef CONFIG_DEBUG_FS +static const struct net_device_ops stmmac_netdev_ops; static void stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(struct net_device *dev); #endif @@ -1109,7 +1110,9 @@ static int stmmac_set_bfsize(int mtu, int bufsize) { int ret = bufsize; - if (mtu >= BUF_SIZE_4KiB) + if (mtu >= BUF_SIZE_8KiB) + ret = BUF_SIZE_16KiB; + else if (mtu >= BUF_SIZE_4KiB) ret = BUF_SIZE_8KiB; else if (mtu >= BUF_SIZE_2KiB) ret = BUF_SIZE_4KiB; @@ -1293,19 +1296,9 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) struct stmmac_priv *priv = netdev_priv(dev); u32 rx_count = priv->plat->rx_queues_to_use; int ret = -ENOMEM; - int bfsize = 0; int queue; int i; - bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); - if (bfsize < 0) - bfsize = 0; - - if (bfsize < BUF_SIZE_16KiB) - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); - - priv->dma_buf_sz = bfsize; - /* RX INITIALIZATION */ netif_dbg(priv, probe, priv->dev, "SKB addresses:\nskb\t\tskb data\tdma data\n"); @@ -1347,8 +1340,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) } } - buf_sz = bfsize; - return 0; err_init_rx_buffers: @@ -2658,6 +2649,7 @@ static void stmmac_hw_teardown(struct net_device *dev) static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + int bfsize = 0; u32 chan; int ret; @@ -2677,7 +2669,16 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); + if (bfsize < 0) + bfsize = 0; + + if (bfsize < BUF_SIZE_16KiB) + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); + + priv->dma_buf_sz = bfsize; + buf_sz = bfsize; + priv->rx_copybreak = STMMAC_RX_COPYBREAK; ret = alloc_dma_desc_resources(priv); @@ -3053,8 +3054,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - } else { - stmmac_tx_timer_arm(priv, queue); } /* We've used all descriptors we need for this skb, however, @@ -3125,6 +3124,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3276,8 +3276,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - } else { - stmmac_tx_timer_arm(priv, queue); } /* We've used all descriptors we need for this skb, however, @@ -3366,6 +3364,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3646,8 +3645,9 @@ read_again: * feature is always disabled and packets need to be * stripped manually. */ - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || - unlikely(status != llc_snap)) { + if (likely(!(status & rx_not_ls)) && + (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || + unlikely(status != llc_snap))) { if (buf2_len) buf2_len -= ETH_FCS_LEN; else @@ -3829,12 +3829,24 @@ static void stmmac_set_rx_mode(struct net_device *dev) static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); + int txfifosz = priv->plat->tx_fifo_size; + + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + txfifosz /= priv->plat->tx_queues_to_use; if (netif_running(dev)) { netdev_err(priv->dev, "must be stopped to change its MTU\n"); return -EBUSY; } + new_mtu = STMMAC_ALIGN(new_mtu); + + /* If condition true, FIFO is too small or MTU too large */ + if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) + return -EINVAL; + dev->mtu = new_mtu; netdev_update_features(dev); @@ -4245,6 +4257,34 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); +/* Use network device events to rename debugfs file entries. + */ +static int stmmac_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct stmmac_priv *priv = netdev_priv(dev); + + if (dev->netdev_ops != &stmmac_netdev_ops) + goto done; + + switch (event) { + case NETDEV_CHANGENAME: + if (priv->dbgfs_dir) + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, + priv->dbgfs_dir, + stmmac_fs_dir, + dev->name); + break; + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block stmmac_notifier = { + .notifier_call = stmmac_device_event, +}; + static void stmmac_init_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -4259,12 +4299,15 @@ static void stmmac_init_fs(struct net_device *dev) /* Entry to report the DMA HW features */ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, &stmmac_dma_cap_fops); + + register_netdevice_notifier(&stmmac_notifier); } static void stmmac_exit_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + unregister_netdevice_notifier(&stmmac_notifier); debugfs_remove_recursive(priv->dbgfs_dir); } #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index bedaff0c13bd..4775f49d7f3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -320,7 +320,7 @@ out: static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, struct device_node *np, struct device *dev) { - bool mdio = true; + bool mdio = !of_phy_is_fixed_link(np); static const struct of_device_id need_mdio_ids[] = { { .compatible = "snps,dwc-qos-ethernet-4.10" }, {}, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index f3d8b9336b8e..450d7dac3ea6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -80,7 +80,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, if (attr->max_size && (attr->max_size > size)) size = attr->max_size; - skb = netdev_alloc_skb_ip_align(priv->dev, size); + skb = netdev_alloc_skb(priv->dev, size); if (!skb) return NULL; @@ -244,6 +244,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, struct net_device *orig_ndev) { struct stmmac_test_priv *tpriv = pt->af_packet_priv; + unsigned char *src = tpriv->packet->src; + unsigned char *dst = tpriv->packet->dst; struct stmmachdr *shdr; struct ethhdr *ehdr; struct udphdr *uhdr; @@ -260,15 +262,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, goto out; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (tpriv->packet->dst) { - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (dst) { + if (!ether_addr_equal_unaligned(ehdr->h_dest, dst)) goto out; } if (tpriv->packet->sarc) { - if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) + if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest)) goto out; - } else if (tpriv->packet->src) { - if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) + } else if (src) { + if (!ether_addr_equal_unaligned(ehdr->h_source, src)) goto out; } @@ -624,6 +626,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv) return -EOPNOTSUPP; if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) return -EOPNOTSUPP; + if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) + return -EOPNOTSUPP; while (--tries) { /* We only need to check the mc_addr for collisions */ @@ -666,6 +670,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv) if (stmmac_filter_check(priv)) return -EOPNOTSUPP; + if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) + return -EOPNOTSUPP; if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) return -EOPNOTSUPP; @@ -710,7 +716,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb, struct ethhdr *ehdr; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) + if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr)) goto out; if (ehdr->h_proto != htons(ETH_P_PAUSE)) goto out; @@ -847,12 +853,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb, if (tpriv->vlan_id) { if (skb->vlan_proto != htons(proto)) goto out; - if (skb->vlan_tci != tpriv->vlan_id) + if (skb->vlan_tci != tpriv->vlan_id) { + /* Means filter did not work. */ + tpriv->ok = false; + complete(&tpriv->comp); goto out; + } } ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst)) goto out; ihdr = ip_hdr(skb); @@ -961,6 +971,9 @@ static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv) { int ret, prev_cap = priv->dma_cap.vlhash; + if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + return -EOPNOTSUPP; + priv->dma_cap.vlhash = 0; ret = __stmmac_test_vlanfilt(priv); priv->dma_cap.vlhash = prev_cap; @@ -1053,6 +1066,9 @@ static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv) { int ret, prev_cap = priv->dma_cap.vlhash; + if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER)) + return -EOPNOTSUPP; + priv->dma_cap.vlhash = 0; ret = __stmmac_test_dvlanfilt(priv); priv->dma_cap.vlhash = prev_cap; @@ -1319,16 +1335,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret; if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + } dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@ -1395,7 +1414,8 @@ cleanup_cls: cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@ -1440,16 +1460,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret; if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + } dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@ -1521,7 +1544,8 @@ cleanup_cls: cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@ -1574,7 +1598,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb, struct arphdr *ahdr; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src)) goto out; ahdr = arp_hdr(skb); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 7d972e0fd2b0..9ffae12a2122 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -577,6 +577,10 @@ static int tc_setup_cls(struct stmmac_priv *priv, { int ret = 0; + /* When RSS is enabled, the filtering will be bypassed */ + if (priv->rss.enable) + return -EBUSY; + switch (cls->command) { case FLOW_CLS_REPLACE: ret = tc_add_flow(priv, cls); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index a46f4189fde3..bf98e0fa7d8b 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -63,6 +63,7 @@ config TI_CPSW_SWITCHDEV tristate "TI CPSW Switch Support with switchdev" depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST depends on NET_SWITCHDEV + select PAGE_POOL select TI_DAVINCI_MDIO select MFD_SYSCON select REGMAP diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index d34df8e5cf94..ecf776ad8689 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_TI_CPSW) += cpsw-common.o obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o +obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o obj-$(CONFIG_TLAN) += tlan.o obj-$(CONFIG_CPMAC) += cpmac.o diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 37ba708ac781..6614fa3089b2 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -1018,7 +1018,6 @@ static int cpdma_chan_submit_si(struct submit_info *si) struct cpdma_chan *chan = si->chan; struct cpdma_ctlr *ctlr = chan->ctlr; int len = si->len; - int swlen = len; struct cpdma_desc __iomem *desc; dma_addr_t buffer; u32 mode; @@ -1046,7 +1045,6 @@ static int cpdma_chan_submit_si(struct submit_info *si) if (si->data_dma) { buffer = si->data_dma; dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); - swlen |= CPDMA_DMA_EXT_MAP; } else { buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); ret = dma_mapping_error(ctlr->dev, buffer); @@ -1065,7 +1063,8 @@ static int cpdma_chan_submit_si(struct submit_info *si) writel_relaxed(mode | len, &desc->hw_mode); writel_relaxed((uintptr_t)si->token, &desc->sw_token); writel_relaxed(buffer, &desc->sw_buffer); - writel_relaxed(swlen, &desc->sw_len); + writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len, + &desc->sw_len); desc_read(desc, sw_len); __cpdma_chan_submit(chan, desc); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b517c1af9de0..91a1059517f5 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -166,6 +166,9 @@ static int fjes_acpi_add(struct acpi_device *device) /* create platform_device */ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource, ARRAY_SIZE(fjes_resource)); + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + device->driver_data = plat_dev; return 0; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index ecfe26215935..f6222ada6818 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -38,7 +38,6 @@ struct pdp_ctx { struct hlist_node hlist_addr; union { - u64 tid; struct { u64 tid; u16 flow; @@ -541,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, mtu = dst_mtu(&rt->dst); } - rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu); + rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { @@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev) } static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); -static void gtp_hashtable_free(struct gtp_dev *gtp); static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); +static void gtp_destructor(struct net_device *dev) +{ + struct gtp_dev *gtp = netdev_priv(dev); + + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); +} + static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, if (err < 0) return err; - if (!data[IFLA_GTP_PDP_HASHSIZE]) + if (!data[IFLA_GTP_PDP_HASHSIZE]) { hashsize = 1024; - else + } else { hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); + if (!hashsize) + hashsize = 1024; + } err = gtp_hashtable_new(gtp, hashsize); if (err < 0) @@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, gn = net_generic(dev_net(dev), gtp_net_id); list_add_rcu(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; netdev_dbg(dev, "registered new GTP interface\n"); return 0; out_hashtable: - gtp_hashtable_free(gtp); + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); out_encap: gtp_encap_disable(gtp); return err; @@ -693,8 +704,13 @@ out_encap: static void gtp_dellink(struct net_device *dev, struct list_head *head) { struct gtp_dev *gtp = netdev_priv(dev); + struct pdp_ctx *pctx; + int i; + + for (i = 0; i < gtp->hash_size; i++) + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) + pdp_context_delete(pctx); - gtp_hashtable_free(gtp); list_del_rcu(>p->list); unregister_netdevice_queue(dev, head); } @@ -772,20 +788,6 @@ err1: return -ENOMEM; } -static void gtp_hashtable_free(struct gtp_dev *gtp) -{ - struct pdp_ctx *pctx; - int i; - - for (i = 0; i < gtp->hash_size; i++) - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) - pdp_context_delete(pctx); - - synchronize_rcu(); - kfree(gtp->addr_hash); - kfree(gtp->tid_hash); -} - static struct sock *gtp_encap_enable_socket(int fd, int type, struct gtp_dev *gtp) { @@ -811,7 +813,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, lock_sock(sock->sk); if (sock->sk->sk_user_data) { sk = ERR_PTR(-EBUSY); - goto out_sock; + goto out_rel_sock; } sk = sock->sk; @@ -824,8 +826,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); -out_sock: +out_rel_sock: release_sock(sock->sk); +out_sock: sockfd_put(sock); return sk; } @@ -926,24 +929,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) } } -static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, - struct genl_info *info) +static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, + struct genl_info *info) { + struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; u32 hash_ms, hash_tid = 0; - struct pdp_ctx *pctx; + unsigned int version; bool found = false; __be32 ms_addr; ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; + version = nla_get_u32(info->attrs[GTPA_VERSION]); - hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) { - if (pctx->ms_addr_ip4.s_addr == ms_addr) { - found = true; - break; - } - } + pctx = ipv4_pdp_find(gtp, ms_addr); + if (pctx) + found = true; + if (version == GTP_V0) + pctx_tid = gtp0_pdp_find(gtp, + nla_get_u64(info->attrs[GTPA_TID])); + else if (version == GTP_V1) + pctx_tid = gtp1_pdp_find(gtp, + nla_get_u32(info->attrs[GTPA_I_TEI])); + if (pctx_tid) + found = true; if (found) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) @@ -951,6 +961,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; + if (pctx && pctx_tid) + return -EEXIST; + if (!pctx) + pctx = pctx_tid; + ipv4_pdp_fill(pctx, info); if (pctx->gtp_version == GTP_V0) @@ -1074,7 +1089,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } - err = ipv4_pdp_add(gtp, sk, info); + err = gtp_pdp_add(gtp, sk, info); out_unlock: rcu_read_unlock(); @@ -1232,43 +1247,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, struct netlink_callback *cb) { struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; + int i, j, bucket = cb->args[0], skip = cb->args[1]; struct net *net = sock_net(skb->sk); - struct gtp_net *gn = net_generic(net, gtp_net_id); - unsigned long tid = cb->args[1]; - int i, k = cb->args[0], ret; struct pdp_ctx *pctx; + struct gtp_net *gn; + + gn = net_generic(net, gtp_net_id); if (cb->args[4]) return 0; + rcu_read_lock(); list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { if (last_gtp && last_gtp != gtp) continue; else last_gtp = NULL; - for (i = k; i < gtp->hash_size; i++) { - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) { - if (tid && tid != pctx->u.tid) - continue; - else - tid = 0; - - ret = gtp_genl_fill_info(skb, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - cb->nlh->nlmsg_type, pctx); - if (ret < 0) { + for (i = bucket; i < gtp->hash_size; i++) { + j = 0; + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], + hlist_tid) { + if (j >= skip && + gtp_genl_fill_info(skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; - cb->args[1] = pctx->u.tid; + cb->args[1] = j; cb->args[2] = (unsigned long)gtp; goto out; } + j++; } + skip = 0; } + bucket = 0; } cb->args[4] = 1; out: + rcu_read_unlock(); return skb->len; } diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 23281aeeb222..71d6629e65c9 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty) { struct sixpack *sp; - write_lock_bh(&disc_data_lock); + write_lock_irq(&disc_data_lock); sp = tty->disc_data; tty->disc_data = NULL; - write_unlock_bh(&disc_data_lock); + write_unlock_irq(&disc_data_lock); if (!sp) return; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index c5bfa19ddb93..deef14215110 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty) { struct mkiss *ax; - write_lock_bh(&disc_data_lock); + write_lock_irq(&disc_data_lock); ax = tty->disc_data; tty->disc_data = NULL; - write_unlock_bh(&disc_data_lock); + write_unlock_irq(&disc_data_lock); if (!ax) return; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 9caa876ce6e8..dc44819946e6 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -169,7 +169,6 @@ struct rndis_device { u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; - u16 rx_table[ITAB_NUM]; }; @@ -940,6 +939,8 @@ struct net_device_context { u32 tx_table[VRSS_SEND_TAB_SIZE]; + u16 rx_table[ITAB_NUM]; + /* Ethtool settings */ u8 duplex; u32 speed; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 02e66473f2ed..f3f9eb8a402a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1662,7 +1662,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) - indir[i] = rndis_dev->rx_table[i]; + indir[i] = ndc->rx_table[i]; } if (key) @@ -1692,7 +1692,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, return -EINVAL; for (i = 0; i < ITAB_NUM; i++) - rndis_dev->rx_table[i] = indir[i]; + ndc->rx_table[i] = indir[i]; } if (!key) { diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 206b4e77eaf0..e66d77dc28c8 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -773,6 +773,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev, const u8 *rss_key, u16 flag) { struct net_device *ndev = rdev->ndev; + struct net_device_context *ndc = netdev_priv(ndev); struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; @@ -812,7 +813,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev, /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = rdev->rx_table[i]; + itab[i] = ndc->rx_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset); @@ -1171,6 +1172,9 @@ int rndis_set_subchannel(struct net_device *ndev, wait_event(nvdev->subchan_open, atomic_read(&nvdev->open_chn) == nvdev->num_chn); + for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) + ndev_ctx->tx_table[i] = i % nvdev->num_chn; + /* ignore failures from setting rss parameters, still have channels */ if (dev_info) rndis_filter_set_rss_param(rdev, dev_info->rss_key); @@ -1180,9 +1184,6 @@ int rndis_set_subchannel(struct net_device *ndev, netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); - for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) - ndev_ctx->tx_table[i] = i % nvdev->num_chn; - return 0; } @@ -1312,6 +1313,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); + struct net_device_context *ndc = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; struct ndis_recv_scale_cap rsscap; @@ -1398,9 +1400,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, /* We will use the given number of channels if available. */ net_device->num_chn = min(net_device->max_chn, device_info->num_chn); - for (i = 0; i < ITAB_NUM; i++) - rndis_device->rx_table[i] = ethtool_rxfh_indir_default( + if (!netif_is_rxfh_configured(net)) { + for (i = 0; i < ITAB_NUM; i++) + ndc->rx_table[i] = ethtool_rxfh_indir_default( i, net_device->num_chn); + } atomic_set(&net_device->open_chn, 1); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); @@ -1439,8 +1443,6 @@ void rndis_filter_device_remove(struct hv_device *dev, /* Halt and release the rndis device */ rndis_filter_halt_device(net_dev, rndis_dev); - net_dev->extension = NULL; - netvsc_device_remove(dev); } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 05631d97eeb4..c5bf61565726 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -513,10 +513,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *dest; if (vlan->mode == MACVLAN_MODE_BRIDGE) { - const struct ethhdr *eth = (void *)skb->data; + const struct ethhdr *eth = skb_eth_hdr(skb); /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 059711edfc61..4b39aba2e9c4 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -53,7 +53,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file, get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE); - id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev)); + id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev)); err = devlink_region_snapshot_create(nsim_dev->dummy_region, dummy_data, id, kfree); if (err) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 5848219005d7..8dc461f7574b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -340,14 +340,14 @@ config DAVICOM_PHY Currently supports dm9161e and dm9131 config DP83822_PHY - tristate "Texas Instruments DP83822 PHY" + tristate "Texas Instruments DP83822/825 PHYs" ---help--- - Supports the DP83822 PHY. + Supports the DP83822 and DP83825I PHYs. config DP83TC811_PHY - tristate "Texas Instruments DP83TC822 PHY" + tristate "Texas Instruments DP83TC811 PHY" ---help--- - Supports the DP83TC822 PHY. + Supports the DP83TC811 PHY. config DP83848_PHY tristate "Texas Instruments DP83848 PHY" diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 3b29d381116f..975789d9349d 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = { .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, .read_status = aqr_read_status, + .suspend = aqr107_suspend, + .resume = aqr107_resume, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR106), diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 9cd9dcee4eb2..01cf71358359 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -97,6 +97,7 @@ #define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03 #define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14) #define DP83867_PHYCR_RESERVED_MASK BIT(11) +#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10) /* RGMIIDCTL bits */ #define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf @@ -599,7 +600,12 @@ static int dp83867_phy_reset(struct phy_device *phydev) usleep_range(10, 20); - return 0; + /* After reset FORCE_LINK_GOOD bit is set. Although the + * default value should be unset. Disable FORCE_LINK_GOOD + * for the phy to work properly. + */ + return phy_modify(phydev, MII_DP83867_PHYCTRL, + DP83867_PHYCR_FORCE_LINK_GOOD, 0); } static struct phy_driver dp83867_driver[] = { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0887ed2bb050..b13c52873ef5 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -553,7 +553,7 @@ static const struct device_type mdio_bus_phy_type = { .pm = MDIO_BUS_PHY_PM_OPS, }; -static int phy_request_driver_module(struct phy_device *dev, int phy_id) +static int phy_request_driver_module(struct phy_device *dev, u32 phy_id) { int ret; @@ -565,15 +565,15 @@ static int phy_request_driver_module(struct phy_device *dev, int phy_id) * then modprobe isn't available. */ if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) { - phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n", - ret, phy_id); + phydev_err(dev, "error %d loading PHY driver module for ID 0x%08lx\n", + ret, (unsigned long)phy_id); return ret; } return 0; } -struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, +struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) { diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 9a616d6bc4eb..ee7a718662c6 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -442,8 +442,7 @@ static void phylink_mac_link_up(struct phylink *pl, pl->cur_interface = link_state.interface; pl->ops->mac_link_up(pl->config, pl->link_an_mode, - pl->phy_state.interface, - pl->phydev); + pl->cur_interface, pl->phydev); if (ndev) netif_carrier_on(ndev); @@ -567,6 +566,9 @@ static int phylink_register_sfp(struct phylink *pl, struct sfp_bus *bus; int ret; + if (!fwnode) + return 0; + bus = sfp_bus_find_fwnode(fwnode); if (IS_ERR(bus)) { ret = PTR_ERR(bus); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index cf1f3f0a4b9b..75bdfae5f3e2 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -511,7 +511,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev, } } else { netdev_warn(dev->net, - "Failed to read stat ret = 0x%x", ret); + "Failed to read stat ret = %d", ret); } kfree(stats); @@ -1808,6 +1808,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) dev->mdiobus->read = lan78xx_mdiobus_read; dev->mdiobus->write = lan78xx_mdiobus_write; dev->mdiobus->name = "lan78xx-mdiobus"; + dev->mdiobus->parent = &dev->udev->dev; snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); @@ -2723,11 +2724,6 @@ static int lan78xx_stop(struct net_device *net) return 0; } -static int lan78xx_linearize(struct sk_buff *skb) -{ - return skb_linearize(skb); -} - static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) { @@ -2739,8 +2735,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, return NULL; } - if (lan78xx_linearize(skb) < 0) + if (skb_linearize(skb)) { + dev_kfree_skb_any(skb); return NULL; + } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; @@ -3752,6 +3750,7 @@ static int lan78xx_probe(struct usb_interface *intf, /* MTU range: 68 - 9000 */ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; + netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4196c0e32740..9485c8d1de8a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1062,6 +1062,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c5ebf35d2488..031cb8fff909 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -6597,6 +6597,9 @@ static int rtl8152_probe(struct usb_interface *intf, return -ENODEV; } + if (intf->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; + usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3ec6b506033d..1c5159dcc720 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2541,7 +2541,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = &rt->dst; skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2581,7 +2581,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index ca0f3be2b6bf..aef7de225783 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = { }, }; -static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; +static struct ucc_tdm_info utdm_info[UCC_MAX_NUM]; static int uhdlc_init(struct ucc_hdlc_private *priv) { diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 0f1217b506ad..e30d91a38cfb 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -64,7 +64,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev) { struct lapbethdev *lapbeth; - list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) { + list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) { if (lapbeth->ethdev == dev) return lapbeth; } diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index e2e679a01b65..77ccf3672ede 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -708,7 +708,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb, spin_lock_irqsave(&sdla_lock, flags); SDLA_WINDOW(dev, addr); - pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK)); + pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK)); __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len); SDLA_WINDOW(dev, addr); pbuf->opp_flag = 1; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 83cc8778ca1e..978f0037ed52 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -8958,6 +8958,7 @@ int ath10k_mac_register(struct ath10k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL); if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) || test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map)) diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c index 956fa7828d0c..56d1a7764b9f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c +++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c @@ -83,7 +83,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, val = swahb32(val); } - __raw_writel(val, mem + reg); + iowrite32(val, mem + reg); usleep_range(100, 120); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 040cec17d3ad..b0b7eca1754e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1111,18 +1111,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; - else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; - else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; + if (cfg == &iwl_ax101_cfg_qu_hr) + cfg = &iwl_ax101_cfg_quz_hr; + else if (cfg == &iwl_ax201_cfg_qu_hr) + cfg = &iwl_ax201_cfg_quz_hr; + else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 0252716c0b24..0d8b2a8ffa5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -57,24 +57,6 @@ #include "internal.h" #include "fw/dbg.h" -static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) -{ - iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); - udelay(20); - iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_PG_EN | - HPM_HIPM_GEN_CFG_CR_SLP_EN); - udelay(20); - iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); - - iwl_trans_sw_reset(trans); - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - return 0; -} - /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) @@ -110,13 +92,6 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && - trans->cfg->integrated) { - ret = iwl_pcie_gen2_force_power_gating(trans); - if (ret) - return ret; - } - ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index af9bc6b64542..a0677131634d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1783,6 +1783,29 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) return 0; } +static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) +{ + int ret; + + ret = iwl_finish_nic_init(trans, trans->trans_cfg); + if (ret < 0) + return ret; + + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + udelay(20); + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_PG_EN | + HPM_HIPM_GEN_CFG_CR_SLP_EN); + udelay(20); + iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + + iwl_trans_pcie_sw_reset(trans); + + return 0; +} + static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1802,6 +1825,13 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) iwl_trans_pcie_sw_reset(trans); + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && + trans->cfg->integrated) { + err = iwl_pcie_gen2_force_power_gating(trans); + if (err) + return err; + } + err = iwl_pcie_apm_init(trans); if (err) return err; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 74e50566db1f..6dd835f1efc2 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -229,6 +229,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, "11D: skip setting domain info in FW\n"); return 0; } + + if (country_ie_len > + (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { + mwifiex_dbg(priv->adapter, ERROR, + "11D: country_ie_len overflow!, deauth AP\n"); + return -EINVAL; + } + memcpy(priv->adapter->country_code, &country_ie[2], 2); domain_info->country_code[0] = country_ie[2]; @@ -272,8 +280,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, priv->scan_block = false; if (bss) { - if (adapter->region_code == 0x00) - mwifiex_process_country_ie(priv, bss); + if (adapter->region_code == 0x00 && + mwifiex_process_country_ie(priv, bss)) + return -EINVAL; /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 09313047beed..7caf1d26124a 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -953,59 +953,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, switch (*pos) { case WLAN_EID_SUPP_RATES: + if (pos[1] > 32) + return; sta_ptr->tdls_cap.rates_len = pos[1]; for (i = 0; i < pos[1]; i++) sta_ptr->tdls_cap.rates[i] = pos[i + 2]; break; case WLAN_EID_EXT_SUPP_RATES: + if (pos[1] > 32) + return; basic = sta_ptr->tdls_cap.rates_len; + if (pos[1] > 32 - basic) + return; for (i = 0; i < pos[1]; i++) sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2]; sta_ptr->tdls_cap.rates_len += pos[1]; break; case WLAN_EID_HT_CAPABILITY: - memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos, + if (pos > end - sizeof(struct ieee80211_ht_cap) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_ht_cap)) + return; + /* copy the ie's value into ht_capb*/ + memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2, sizeof(struct ieee80211_ht_cap)); sta_ptr->is_11n_enabled = 1; break; case WLAN_EID_HT_OPERATION: - memcpy(&sta_ptr->tdls_cap.ht_oper, pos, + if (pos > end - + sizeof(struct ieee80211_ht_operation) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_ht_operation)) + return; + /* copy the ie's value into ht_oper*/ + memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2, sizeof(struct ieee80211_ht_operation)); break; case WLAN_EID_BSS_COEX_2040: + if (pos > end - 3) + return; + if (pos[1] != 1) + return; sta_ptr->tdls_cap.coex_2040 = pos[2]; break; case WLAN_EID_EXT_CAPABILITY: + if (pos > end - sizeof(struct ieee_types_header)) + return; + if (pos[1] < sizeof(struct ieee_types_header)) + return; + if (pos[1] > 8) + return; memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos, sizeof(struct ieee_types_header) + min_t(u8, pos[1], 8)); break; case WLAN_EID_RSN: + if (pos > end - sizeof(struct ieee_types_header)) + return; + if (pos[1] < sizeof(struct ieee_types_header)) + return; + if (pos[1] > IEEE_MAX_IE_SIZE - + sizeof(struct ieee_types_header)) + return; memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos, sizeof(struct ieee_types_header) + min_t(u8, pos[1], IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header))); break; case WLAN_EID_QOS_CAPA: + if (pos > end - 3) + return; + if (pos[1] != 1) + return; sta_ptr->tdls_cap.qos_info = pos[2]; break; case WLAN_EID_VHT_OPERATION: - if (priv->adapter->is_hw_11ac_capable) - memcpy(&sta_ptr->tdls_cap.vhtoper, pos, + if (priv->adapter->is_hw_11ac_capable) { + if (pos > end - + sizeof(struct ieee80211_vht_operation) - 2) + return; + if (pos[1] != + sizeof(struct ieee80211_vht_operation)) + return; + /* copy the ie's value into vhtoper*/ + memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2, sizeof(struct ieee80211_vht_operation)); + } break; case WLAN_EID_VHT_CAPABILITY: if (priv->adapter->is_hw_11ac_capable) { - memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos, + if (pos > end - + sizeof(struct ieee80211_vht_cap) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_vht_cap)) + return; + /* copy the ie's value into vhtcap*/ + memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2, sizeof(struct ieee80211_vht_cap)); sta_ptr->is_11ac_enabled = 1; } break; case WLAN_EID_AID: - if (priv->adapter->is_hw_11ac_capable) + if (priv->adapter->is_hw_11ac_capable) { + if (pos > end - 4) + return; + if (pos[1] != 2) + return; sta_ptr->tdls_cap.aid = get_unaligned_le16((pos + 2)); + } + break; default: break; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index a03e2d01fba7..d1405528b504 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -342,8 +342,11 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev) dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", version, fae); - mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); + memcpy(dev->mt76.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + ETH_ALEN); mt76_eeprom_override(&dev->mt76); + mt76x02_mac_setaddr(dev, dev->mt76.macaddr); + mt76x0_set_chip_cap(dev); mt76x0_set_freq_offset(dev); mt76x0_set_temp_offset(dev); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 68dd7bb07ca6..f15ba3de6195 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -628,18 +628,6 @@ err: static void xenvif_disconnect_queue(struct xenvif_queue *queue) { - if (queue->tx_irq) { - unbind_from_irqhandler(queue->tx_irq, queue); - if (queue->tx_irq == queue->rx_irq) - queue->rx_irq = 0; - queue->tx_irq = 0; - } - - if (queue->rx_irq) { - unbind_from_irqhandler(queue->rx_irq, queue); - queue->rx_irq = 0; - } - if (queue->task) { kthread_stop(queue->task); queue->task = NULL; @@ -655,6 +643,18 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue) queue->napi.poll = NULL; } + if (queue->tx_irq) { + unbind_from_irqhandler(queue->tx_irq, queue); + if (queue->tx_irq == queue->rx_irq) + queue->rx_irq = 0; + queue->tx_irq = 0; + } + + if (queue->rx_irq) { + unbind_from_irqhandler(queue->rx_irq, queue); + queue->rx_irq = 0; + } + xenvif_unmap_frontend_data_rings(queue); } diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 4d1909aecd6c..9f60e4dc5a90 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -278,7 +278,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client, r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios); if (r) - return r; + dev_dbg(dev, "Unable to add GPIO mapping table\n"); phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_en)) { diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index 4590fbf82dc2..f5bb7ace2ff5 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) cmd, sizeof(cmd), false); rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd), - &transferred, 0); + &transferred, 5000); kfree(buffer); if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index be110d9cef02..de613c623a2c 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -507,7 +507,10 @@ int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) struct s3fwrn5_info *info = nci_get_drvdata(ndev); struct s3fwrn5_fw_info *fw_info = &info->fw_info; - BUG_ON(fw_info->rsp); + if (WARN_ON(fw_info->rsp)) { + kfree_skb(skb); + return -EINVAL; + } fw_info->rsp = skb; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 667f18f465be..5dc32b72e7fa 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status) case NVME_SC_CAP_EXCEEDED: return BLK_STS_NOSPC; case NVME_SC_LBA_RANGE: + case NVME_SC_CMD_INTERRUPTED: + case NVME_SC_NS_NOT_READY: return BLK_STS_TARGET; case NVME_SC_BAD_ATTRIBUTES: case NVME_SC_ONCS_NOT_SUPPORTED: diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 56c21b501185..72a7e41f3018 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) return len; } +static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) +{ + switch (cdw10 & 0xff) { + case NVME_FEAT_HOST_ID: + return sizeof(req->sq->ctrl->hostid); + default: + return 0; + } +} + u64 nvmet_get_log_page_offset(struct nvme_command *cmd) { return le64_to_cpu(cmd->get_log_page.lpo); @@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; - if (!nvmet_check_data_len(req, 0)) + if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10))) return; switch (cdw10 & 0xff) { diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index c6b87ce2b0cc..fc757ef6eadc 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -162,7 +162,7 @@ static const struct of_device_id whitelist_phys[] = { * A device which is not a phy is expected to have a compatible string * indicating what sort of device it is. */ -static bool of_mdiobus_child_is_phy(struct device_node *child) +bool of_mdiobus_child_is_phy(struct device_node *child) { u32 phy_id; @@ -187,6 +187,7 @@ static bool of_mdiobus_child_is_phy(struct device_node *child) return false; } +EXPORT_SYMBOL(of_mdiobus_child_is_phy); /** * of_mdiobus_register - Register mii_bus and create PHYs from the device tree diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 773128f411f1..d704eccc548f 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -814,7 +814,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) if (err) { dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", err, &res_0->start); - goto out_cpuhp_err; + return err; } err = perf_pmu_register(&smmu_pmu->pmu, name, -1); @@ -833,8 +833,6 @@ static int smmu_pmu_probe(struct platform_device *pdev) out_unregister: cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); -out_cpuhp_err: - put_cpu(); return err; } diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index ead06c6c2601..12e71a315a2c 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -115,7 +115,7 @@ struct cpcap_usb_ints_state { enum cpcap_gpio_mode { CPCAP_DM_DP, CPCAP_MDM_RX_TX, - CPCAP_UNKNOWN, + CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */ CPCAP_OTG_DM_DP, }; @@ -134,6 +134,8 @@ struct cpcap_phy_ddata { struct iio_channel *id; struct regulator *vusb; atomic_t active; + unsigned int vbus_provider:1; + unsigned int docked:1; }; static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata) @@ -207,6 +209,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata, static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata); static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata); +static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata, + enum musb_vbus_id_status status) +{ + int error; + + error = musb_mailbox(status); + if (!error) + return; + + dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n", + __func__, error); +} + static void cpcap_usb_detect(struct work_struct *work) { struct cpcap_phy_ddata *ddata; @@ -220,16 +235,66 @@ static void cpcap_usb_detect(struct work_struct *work) if (error) return; - if (s.id_ground) { - dev_dbg(ddata->dev, "id ground, USB host mode\n"); + vbus = cpcap_usb_vbus_valid(ddata); + + /* We need to kick the VBUS as USB A-host */ + if (s.id_ground && ddata->vbus_provider) { + dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n"); + + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, + CPCAP_BIT_VBUSSTBY_EN | + CPCAP_BIT_VBUSEN_SPI, + CPCAP_BIT_VBUSEN_SPI); + if (error) + goto out_err; + + return; + } + + if (vbus && s.id_ground && ddata->docked) { + dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n"); + + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + return; + } + + /* No VBUS needed with docks */ + if (vbus && s.id_ground && !ddata->vbus_provider) { + dev_dbg(ddata->dev, "connected to a dock\n"); + + ddata->docked = true; + error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_ID_GROUND); + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + /* + * Force check state again after musb has reoriented, + * otherwise devices won't enumerate after loading PHY + * driver. + */ + schedule_delayed_work(&ddata->detect_work, + msecs_to_jiffies(1000)); + + return; + } + + if (s.id_ground && !ddata->docked) { + dev_dbg(ddata->dev, "id ground, USB host mode\n"); + + ddata->vbus_provider = true; + + error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, CPCAP_BIT_VBUSSTBY_EN | CPCAP_BIT_VBUSEN_SPI, @@ -248,43 +313,26 @@ static void cpcap_usb_detect(struct work_struct *work) vbus = cpcap_usb_vbus_valid(ddata); + /* Otherwise assume we're connected to a USB host */ if (vbus) { - /* Are we connected to a docking station with vbus? */ - if (s.id_ground) { - dev_dbg(ddata->dev, "connected to a dock\n"); - - /* No VBUS needed with docks */ - error = cpcap_usb_set_usb_mode(ddata); - if (error) - goto out_err; - error = musb_mailbox(MUSB_ID_GROUND); - if (error) - goto out_err; - - return; - } - - /* Otherwise assume we're connected to a USB host */ dev_dbg(ddata->dev, "connected to USB host\n"); error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_VBUS_VALID); - if (error) - goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID); return; } + ddata->vbus_provider = false; + ddata->docked = false; + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF); + /* Default to debug UART mode */ error = cpcap_usb_set_uart_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_VBUS_OFF); - if (error) - goto out_err; - dev_dbg(ddata->dev, "set UART mode\n"); return; @@ -376,7 +424,8 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata) { int error; - error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP); + /* Disable lines to prevent glitches from waking up mdm6600 */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED); if (error) goto out_err; @@ -403,6 +452,11 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; + /* Enable UART mode */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP); + if (error) + goto out_err; + return 0; out_err: @@ -415,7 +469,8 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) { int error; - error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP); + /* Disable lines to prevent glitches from waking up mdm6600 */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED); if (error) return error; @@ -434,12 +489,6 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; - error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2, - CPCAP_BIT_USBXCVREN, - CPCAP_BIT_USBXCVREN); - if (error) - goto out_err; - error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, CPCAP_BIT_PU_SPI | CPCAP_BIT_DMPD_SPI | @@ -455,6 +504,11 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; + /* Enable USB mode */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP); + if (error) + goto out_err; + return 0; out_err: @@ -649,9 +703,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev) if (error) dev_err(ddata->dev, "could not set UART mode\n"); - error = musb_mailbox(MUSB_VBUS_OFF); - if (error) - dev_err(ddata->dev, "could not set mailbox\n"); + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF); usb_remove_phy(&ddata->phy); cancel_delayed_work_sync(&ddata->detect_work); diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index ee184d5607bd..f20524f0c21d 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work) struct phy_mdm6600 *ddata; struct device *dev; DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES); - int error, i, val = 0; + int error; ddata = container_of(work, struct phy_mdm6600, status_work.work); dev = ddata->dev; @@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work) if (error) return; - for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) { - val |= test_bit(i, values) << i; - dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", - __func__, i, test_bit(i, values), val); - } - ddata->status = values[0]; + ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1); dev_info(dev, "modem status: %i %s\n", ddata->status, - phy_mdm6600_status_name[ddata->status & 7]); + phy_mdm6600_status_name[ddata->status]); complete(&ddata->ack); } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 091e20303a14..66f91726b8b2 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -66,7 +66,7 @@ /* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ #define CLAMP_EN BIT(0) /* enables i/o clamp_n */ -#define PHY_INIT_COMPLETE_TIMEOUT 1000 +#define PHY_INIT_COMPLETE_TIMEOUT 10000 #define POWER_DOWN_DELAY_US_MIN 10 #define POWER_DOWN_DELAY_US_MAX 11 diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c index 2b97fb1185a0..9ca20c947283 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c @@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw, { const struct pre_pll_config *cfg = pre_pll_cfg_table; + rate = (rate / 1000) * 1000; + for (; cfg->pixclock != 0; cfg++) if (cfg->pixclock == rate && !cfg->fracdiv) break; @@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw, { const struct pre_pll_config *cfg = pre_pll_cfg_table; + rate = (rate / 1000) * 1000; + for (; cfg->pixclock != 0; cfg++) if (cfg->pixclock == rate) break; diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 3bfbf2ff6e2b..df0ef69dd474 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -422,6 +422,7 @@ config PINCTRL_TB10X config PINCTRL_EQUILIBRIUM tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC" + depends on OF && HAS_IOMEM select PINMUX select PINCONF select GPIOLIB diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index c6800d220920..bb07024d22ed 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -1088,60 +1088,52 @@ SSSF_PIN_DECL(AF15, GPIOV7, LPCSMI, SIG_DESC_SET(SCU434, 15)); #define AB7 176 SIG_EXPR_LIST_DECL_SESG(AB7, LAD0, LPC, SIG_DESC_SET(SCU434, 16), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16)); PIN_DECL_2(AB7, GPIOW0, LAD0, ESPID0); #define AB8 177 SIG_EXPR_LIST_DECL_SESG(AB8, LAD1, LPC, SIG_DESC_SET(SCU434, 17), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17)); PIN_DECL_2(AB8, GPIOW1, LAD1, ESPID1); #define AC8 178 SIG_EXPR_LIST_DECL_SESG(AC8, LAD2, LPC, SIG_DESC_SET(SCU434, 18), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18)); PIN_DECL_2(AC8, GPIOW2, LAD2, ESPID2); #define AC7 179 SIG_EXPR_LIST_DECL_SESG(AC7, LAD3, LPC, SIG_DESC_SET(SCU434, 19), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19)); PIN_DECL_2(AC7, GPIOW3, LAD3, ESPID3); #define AE7 180 SIG_EXPR_LIST_DECL_SESG(AE7, LCLK, LPC, SIG_DESC_SET(SCU434, 20), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20)); PIN_DECL_2(AE7, GPIOW4, LCLK, ESPICK); #define AF7 181 SIG_EXPR_LIST_DECL_SESG(AF7, LFRAME, LPC, SIG_DESC_SET(SCU434, 21), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21)); PIN_DECL_2(AF7, GPIOW5, LFRAME, ESPICS); #define AD7 182 SIG_EXPR_LIST_DECL_SESG(AD7, LSIRQ, LSIRQ, SIG_DESC_SET(SCU434, 22), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22)); PIN_DECL_2(AD7, GPIOW6, LSIRQ, ESPIALT); FUNC_GROUP_DECL(LSIRQ, AD7); FUNC_GROUP_DECL(ESPIALT, AD7); #define AD8 183 SIG_EXPR_LIST_DECL_SESG(AD8, LPCRST, LPC, SIG_DESC_SET(SCU434, 23), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23)); PIN_DECL_2(AD8, GPIOW7, LPCRST, ESPIRST); FUNC_GROUP_DECL(LPC, AB7, AB8, AC8, AC7, AE7, AF7, AD8); diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig index f1806fd781a0..530426a74f75 100644 --- a/drivers/pinctrl/cirrus/Kconfig +++ b/drivers/pinctrl/cirrus/Kconfig @@ -2,6 +2,7 @@ config PINCTRL_LOCHNAGAR tristate "Cirrus Logic Lochnagar pinctrl driver" depends on MFD_LOCHNAGAR + select GPIOLIB select PINMUX select PINCONF select GENERIC_PINCONF diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 9ffb22211d2b..55141d5de29e 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -110,7 +110,6 @@ struct byt_gpio { struct platform_device *pdev; struct pinctrl_dev *pctl_dev; struct pinctrl_desc pctl_desc; - raw_spinlock_t lock; const struct intel_pinctrl_soc_data *soc_data; struct intel_community *communities_copy; struct byt_gpio_pin_context *saved_context; @@ -494,34 +493,34 @@ static const struct intel_pinctrl_soc_data byt_sus_soc_data = { }; static const struct pinctrl_pin_desc byt_ncore_pins[] = { - PINCTRL_PIN(0, "GPIO_NCORE0"), - PINCTRL_PIN(1, "GPIO_NCORE1"), - PINCTRL_PIN(2, "GPIO_NCORE2"), - PINCTRL_PIN(3, "GPIO_NCORE3"), - PINCTRL_PIN(4, "GPIO_NCORE4"), - PINCTRL_PIN(5, "GPIO_NCORE5"), - PINCTRL_PIN(6, "GPIO_NCORE6"), - PINCTRL_PIN(7, "GPIO_NCORE7"), - PINCTRL_PIN(8, "GPIO_NCORE8"), - PINCTRL_PIN(9, "GPIO_NCORE9"), - PINCTRL_PIN(10, "GPIO_NCORE10"), - PINCTRL_PIN(11, "GPIO_NCORE11"), - PINCTRL_PIN(12, "GPIO_NCORE12"), - PINCTRL_PIN(13, "GPIO_NCORE13"), - PINCTRL_PIN(14, "GPIO_NCORE14"), - PINCTRL_PIN(15, "GPIO_NCORE15"), - PINCTRL_PIN(16, "GPIO_NCORE16"), - PINCTRL_PIN(17, "GPIO_NCORE17"), - PINCTRL_PIN(18, "GPIO_NCORE18"), - PINCTRL_PIN(19, "GPIO_NCORE19"), - PINCTRL_PIN(20, "GPIO_NCORE20"), - PINCTRL_PIN(21, "GPIO_NCORE21"), - PINCTRL_PIN(22, "GPIO_NCORE22"), - PINCTRL_PIN(23, "GPIO_NCORE23"), - PINCTRL_PIN(24, "GPIO_NCORE24"), - PINCTRL_PIN(25, "GPIO_NCORE25"), - PINCTRL_PIN(26, "GPIO_NCORE26"), - PINCTRL_PIN(27, "GPIO_NCORE27"), + PINCTRL_PIN(0, "HV_DDI0_HPD"), + PINCTRL_PIN(1, "HV_DDI0_DDC_SDA"), + PINCTRL_PIN(2, "HV_DDI0_DDC_SCL"), + PINCTRL_PIN(3, "PANEL0_VDDEN"), + PINCTRL_PIN(4, "PANEL0_BKLTEN"), + PINCTRL_PIN(5, "PANEL0_BKLTCTL"), + PINCTRL_PIN(6, "HV_DDI1_HPD"), + PINCTRL_PIN(7, "HV_DDI1_DDC_SDA"), + PINCTRL_PIN(8, "HV_DDI1_DDC_SCL"), + PINCTRL_PIN(9, "PANEL1_VDDEN"), + PINCTRL_PIN(10, "PANEL1_BKLTEN"), + PINCTRL_PIN(11, "PANEL1_BKLTCTL"), + PINCTRL_PIN(12, "GP_INTD_DSI_TE1"), + PINCTRL_PIN(13, "HV_DDI2_DDC_SDA"), + PINCTRL_PIN(14, "HV_DDI2_DDC_SCL"), + PINCTRL_PIN(15, "GP_CAMERASB00"), + PINCTRL_PIN(16, "GP_CAMERASB01"), + PINCTRL_PIN(17, "GP_CAMERASB02"), + PINCTRL_PIN(18, "GP_CAMERASB03"), + PINCTRL_PIN(19, "GP_CAMERASB04"), + PINCTRL_PIN(20, "GP_CAMERASB05"), + PINCTRL_PIN(21, "GP_CAMERASB06"), + PINCTRL_PIN(22, "GP_CAMERASB07"), + PINCTRL_PIN(23, "GP_CAMERASB08"), + PINCTRL_PIN(24, "GP_CAMERASB09"), + PINCTRL_PIN(25, "GP_CAMERASB10"), + PINCTRL_PIN(26, "GP_CAMERASB11"), + PINCTRL_PIN(27, "GP_INTD_DSI_TE2"), }; static const unsigned int byt_ncore_pins_map[BYT_NGPIO_NCORE] = { @@ -549,6 +548,8 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = { NULL }; +static DEFINE_RAW_SPINLOCK(byt_lock); + static struct intel_community *byt_get_community(struct byt_gpio *vg, unsigned int pin) { @@ -658,7 +659,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg, unsigned long flags; int i; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); for (i = 0; i < group.npins; i++) { void __iomem *padcfg0; @@ -678,7 +679,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg, writel(value, padcfg0); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static void byt_set_group_mixed_mux(struct byt_gpio *vg, @@ -688,7 +689,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg, unsigned long flags; int i; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); for (i = 0; i < group.npins; i++) { void __iomem *padcfg0; @@ -708,7 +709,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg, writel(value, padcfg0); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, @@ -749,11 +750,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset) unsigned long flags; u32 value; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); writel(value, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, @@ -765,7 +766,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, u32 value, gpio_mux; unsigned long flags; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); /* * In most cases, func pin mux 000 means GPIO function. @@ -787,7 +788,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, "pin %u forcibly re-configured as GPIO\n", offset); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); pm_runtime_get(&vg->pdev->dev); @@ -815,7 +816,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, unsigned long flags; u32 value; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(val_reg); value &= ~BYT_DIR_MASK; @@ -832,7 +833,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, "Potential Error: Setting GPIO with direct_irq_en to output"); writel(value, val_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -901,11 +902,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, u32 conf, pull, val, debounce; u16 arg = 0; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); conf = readl(conf_reg); pull = conf & BYT_PULL_ASSIGN_MASK; val = readl(val_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); switch (param) { case PIN_CONFIG_BIAS_DISABLE: @@ -932,9 +933,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, if (!(conf & BYT_DEBOUNCE_EN)) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); debounce = readl(db_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { case BYT_DEBOUNCE_PULSE_375US: @@ -986,7 +987,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, u32 conf, val, debounce; int i, ret = 0; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); conf = readl(conf_reg); val = readl(val_reg); @@ -1094,7 +1095,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, if (!ret) writel(conf, conf_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return ret; } @@ -1119,9 +1120,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset) unsigned long flags; u32 val; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); val = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return !!(val & BYT_LEVEL); } @@ -1136,13 +1137,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) if (!reg) return; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); old_val = readl(reg); if (value) writel(old_val | BYT_LEVEL, reg); else writel(old_val & ~BYT_LEVEL, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) @@ -1155,9 +1156,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) if (!reg) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); if (!(value & BYT_OUTPUT_EN)) return 0; @@ -1200,14 +1201,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) const char *label; unsigned int pin; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); pin = vg->soc_data->pins[i].number; reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG); if (!reg) { seq_printf(s, "Could not retrieve pin %i conf0 reg\n", pin); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); continue; } conf0 = readl(reg); @@ -1216,11 +1217,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) if (!reg) { seq_printf(s, "Could not retrieve pin %i val reg\n", pin); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); continue; } val = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); comm = byt_get_community(vg, pin); if (!comm) { @@ -1304,9 +1305,9 @@ static void byt_irq_ack(struct irq_data *d) if (!reg) return; - raw_spin_lock(&vg->lock); + raw_spin_lock(&byt_lock); writel(BIT(offset % 32), reg); - raw_spin_unlock(&vg->lock); + raw_spin_unlock(&byt_lock); } static void byt_irq_mask(struct irq_data *d) @@ -1330,7 +1331,7 @@ static void byt_irq_unmask(struct irq_data *d) if (!reg) return; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); switch (irqd_get_trigger_type(d)) { @@ -1353,7 +1354,7 @@ static void byt_irq_unmask(struct irq_data *d) writel(value, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_irq_type(struct irq_data *d, unsigned int type) @@ -1367,7 +1368,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) if (!reg || offset >= vg->chip.ngpio) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); WARN(value & BYT_DIRECT_IRQ_EN, @@ -1389,7 +1390,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) else if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(d, handle_level_irq); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -1425,9 +1426,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc) continue; } - raw_spin_lock(&vg->lock); + raw_spin_lock(&byt_lock); pending = readl(reg); - raw_spin_unlock(&vg->lock); + raw_spin_unlock(&byt_lock); for_each_set_bit(pin, &pending, 32) { virq = irq_find_mapping(vg->chip.irq.domain, base + pin); generic_handle_irq(virq); @@ -1450,9 +1451,9 @@ static void byt_init_irq_valid_mask(struct gpio_chip *chip, */ } -static void byt_gpio_irq_init_hw(struct byt_gpio *vg) +static int byt_gpio_irq_init_hw(struct gpio_chip *chip) { - struct gpio_chip *gc = &vg->chip; + struct byt_gpio *vg = gpiochip_get_data(chip); struct device *dev = &vg->pdev->dev; void __iomem *reg; u32 base, value; @@ -1476,7 +1477,7 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) value = readl(reg); if (value & BYT_DIRECT_IRQ_EN) { - clear_bit(i, gc->irq.valid_mask); + clear_bit(i, chip->irq.valid_mask); dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i); } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) { byt_gpio_clear_triggering(vg, i); @@ -1504,6 +1505,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) "GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n", base / 32, value); } + + return 0; +} + +static int byt_gpio_add_pin_ranges(struct gpio_chip *chip) +{ + struct byt_gpio *vg = gpiochip_get_data(chip); + struct device *dev = &vg->pdev->dev; + int ret; + + ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc_data->npins); + if (ret) + dev_err(dev, "failed to add GPIO pin range\n"); + + return ret; } static int byt_gpio_probe(struct byt_gpio *vg) @@ -1518,6 +1534,7 @@ static int byt_gpio_probe(struct byt_gpio *vg) gc->label = dev_name(&vg->pdev->dev); gc->base = -1; gc->can_sleep = false; + gc->add_pin_ranges = byt_gpio_add_pin_ranges; gc->parent = &vg->pdev->dev; gc->ngpio = vg->soc_data->npins; gc->irq.init_valid_mask = byt_init_irq_valid_mask; @@ -1528,33 +1545,30 @@ static int byt_gpio_probe(struct byt_gpio *vg) if (!vg->saved_context) return -ENOMEM; #endif - ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg); - if (ret) { - dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n"); - return ret; - } - - ret = gpiochip_add_pin_range(&vg->chip, dev_name(&vg->pdev->dev), - 0, 0, vg->soc_data->npins); - if (ret) { - dev_err(&vg->pdev->dev, "failed to add GPIO pin range\n"); - return ret; - } /* set up interrupts */ irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0); if (irq_rc && irq_rc->start) { - byt_gpio_irq_init_hw(vg); - ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0, - handle_bad_irq, IRQ_TYPE_NONE); - if (ret) { - dev_err(&vg->pdev->dev, "failed to add irqchip\n"); - return ret; - } + struct gpio_irq_chip *girq; + + girq = &gc->irq; + girq->chip = &byt_irqchip; + girq->init_hw = byt_gpio_irq_init_hw; + girq->parent_handler = byt_gpio_irq_handler; + girq->num_parents = 1; + girq->parents = devm_kcalloc(&vg->pdev->dev, girq->num_parents, + sizeof(*girq->parents), GFP_KERNEL); + if (!girq->parents) + return -ENOMEM; + girq->parents[0] = (unsigned int)irq_rc->start; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + } - gpiochip_set_chained_irqchip(gc, &byt_irqchip, - (unsigned)irq_rc->start, - byt_gpio_irq_handler); + ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg); + if (ret) { + dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n"); + return ret; } return ret; @@ -1638,8 +1652,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(vg->pctl_dev); } - raw_spin_lock_init(&vg->lock); - ret = byt_gpio_probe(vg); if (ret) return ret; @@ -1654,8 +1666,11 @@ static int byt_pinctrl_probe(struct platform_device *pdev) static int byt_gpio_suspend(struct device *dev) { struct byt_gpio *vg = dev_get_drvdata(dev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&byt_lock, flags); + for (i = 0; i < vg->soc_data->npins; i++) { void __iomem *reg; u32 value; @@ -1676,14 +1691,18 @@ static int byt_gpio_suspend(struct device *dev) vg->saved_context[i].val = value; } + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } static int byt_gpio_resume(struct device *dev) { struct byt_gpio *vg = dev_get_drvdata(dev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&byt_lock, flags); + for (i = 0; i < vg->soc_data->npins; i++) { void __iomem *reg; u32 value; @@ -1721,6 +1740,7 @@ static int byt_gpio_resume(struct device *dev) } } + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } #endif diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 582fa8a75559..60527b93a711 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -149,6 +149,7 @@ struct chv_pin_context { * @chip: GPIO chip in this pin controller * @irqchip: IRQ chip in this pin controller * @regs: MMIO registers + * @irq: Our parent irq * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO * offset (in GPIO number space) * @community: Community this pinctrl instance represents @@ -165,6 +166,7 @@ struct chv_pinctrl { struct gpio_chip chip; struct irq_chip irqchip; void __iomem *regs; + unsigned int irq; unsigned int intr_lines[16]; const struct chv_community *community; u32 saved_intmask; @@ -1555,39 +1557,9 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip, } } -static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) +static int chv_gpio_irq_init_hw(struct gpio_chip *chip) { - const struct chv_gpio_pinrange *range; - struct gpio_chip *chip = &pctrl->chip; - bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); - const struct chv_community *community = pctrl->community; - int ret, i, irq_base; - - *chip = chv_gpio_chip; - - chip->ngpio = community->pins[community->npins - 1].number + 1; - chip->label = dev_name(pctrl->dev); - chip->parent = pctrl->dev; - chip->base = -1; - if (need_valid_mask) - chip->irq.init_valid_mask = chv_init_irq_valid_mask; - - ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); - if (ret) { - dev_err(pctrl->dev, "Failed to register gpiochip\n"); - return ret; - } - - for (i = 0; i < community->ngpio_ranges; i++) { - range = &community->gpio_ranges[i]; - ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev), - range->base, range->base, - range->npins); - if (ret) { - dev_err(pctrl->dev, "failed to add GPIO pin range\n"); - return ret; - } - } + struct chv_pinctrl *pctrl = gpiochip_get_data(chip); /* * The same set of machines in chv_no_valid_mask[] have incorrectly @@ -1596,7 +1568,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) * * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953. */ - if (!need_valid_mask) { + if (!pctrl->chip.irq.init_valid_mask) { /* * Mask all interrupts the community is able to generate * but leave the ones that can only generate GPEs unmasked. @@ -1608,15 +1580,47 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) /* Clear all interrupts */ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); - if (!need_valid_mask) { - irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, - community->npins, NUMA_NO_NODE); - if (irq_base < 0) { - dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); - return irq_base; + return 0; +} + +static int chv_gpio_add_pin_ranges(struct gpio_chip *chip) +{ + struct chv_pinctrl *pctrl = gpiochip_get_data(chip); + const struct chv_community *community = pctrl->community; + const struct chv_gpio_pinrange *range; + int ret, i; + + for (i = 0; i < community->ngpio_ranges; i++) { + range = &community->gpio_ranges[i]; + ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev), + range->base, range->base, + range->npins); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO pin range\n"); + return ret; } } + return 0; +} + +static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) +{ + const struct chv_gpio_pinrange *range; + struct gpio_chip *chip = &pctrl->chip; + bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); + const struct chv_community *community = pctrl->community; + int ret, i, irq_base; + + *chip = chv_gpio_chip; + + chip->ngpio = community->pins[community->npins - 1].number + 1; + chip->label = dev_name(pctrl->dev); + chip->add_pin_ranges = chv_gpio_add_pin_ranges; + chip->parent = pctrl->dev; + chip->base = -1; + + pctrl->irq = irq; pctrl->irqchip.name = "chv-gpio"; pctrl->irqchip.irq_startup = chv_gpio_irq_startup; pctrl->irqchip.irq_ack = chv_gpio_irq_ack; @@ -1625,10 +1629,27 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) pctrl->irqchip.irq_set_type = chv_gpio_irq_type; pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE; - ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0, - handle_bad_irq, IRQ_TYPE_NONE); + chip->irq.chip = &pctrl->irqchip; + chip->irq.init_hw = chv_gpio_irq_init_hw; + chip->irq.parent_handler = chv_gpio_irq_handler; + chip->irq.num_parents = 1; + chip->irq.parents = &pctrl->irq; + chip->irq.default_type = IRQ_TYPE_NONE; + chip->irq.handler = handle_bad_irq; + if (need_valid_mask) { + chip->irq.init_valid_mask = chv_init_irq_valid_mask; + } else { + irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, + community->npins, NUMA_NO_NODE); + if (irq_base < 0) { + dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); + return irq_base; + } + } + + ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); if (ret) { - dev_err(pctrl->dev, "failed to add IRQ chip\n"); + dev_err(pctrl->dev, "Failed to register gpiochip\n"); return ret; } @@ -1642,8 +1663,6 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) } } - gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq, - chv_gpio_irq_handler); return 0; } diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 3c80828a5e50..bbc919bef2bf 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc, return ret; meson_calc_reg_and_bit(bank, pin, REG_DS, ®, &bit); + bit = bit << 1; ret = regmap_read(pc->reg_ds, reg, &val); if (ret) diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 24e0e2ef47a4..369e04350e3d 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -1809,7 +1809,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc, unsigned int pin, bool high) { - if (jzpc->version >= ID_JZ4770) + if (jzpc->version >= ID_JZ4760) ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high); else ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high); diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index e914f6efd39e..9503ddf2edc7 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -85,7 +85,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin) const struct pinmux_ops *ops = pctldev->desc->pmxops; /* Can't inspect pin, assume it can be used */ - if (!desc) + if (!desc || !ops) return true; if (ops->strict && desc->mux_usecount) diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c index bb0edf51dfda..5731d1b60e28 100644 --- a/drivers/platform/chrome/wilco_ec/keyboard_leds.c +++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c @@ -73,13 +73,6 @@ static int send_kbbl_msg(struct wilco_ec_device *ec, return ret; } - if (response->status) { - dev_err(ec->dev, - "EC reported failure sending keyboard LEDs command: %d", - response->status); - return -EIO; - } - return 0; } @@ -87,6 +80,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness) { struct wilco_keyboard_leds_msg request; struct wilco_keyboard_leds_msg response; + int ret; memset(&request, 0, sizeof(request)); request.command = WILCO_EC_COMMAND_KBBL; @@ -94,7 +88,18 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness) request.mode = WILCO_KBBL_MODE_FLAG_PWM; request.percent = brightness; - return send_kbbl_msg(ec, &request, &response); + ret = send_kbbl_msg(ec, &request, &response); + if (ret < 0) + return ret; + + if (response.status) { + dev_err(ec->dev, + "EC reported failure sending keyboard LEDs command: %d", + response.status); + return -EIO; + } + + return 0; } static int kbbl_exist(struct wilco_ec_device *ec, bool *exists) @@ -140,6 +145,13 @@ static int kbbl_init(struct wilco_ec_device *ec) if (ret < 0) return ret; + if (response.status) { + dev_err(ec->dev, + "EC reported failure sending keyboard LEDs command: %d", + response.status); + return -EIO; + } + if (response.mode & WILCO_KBBL_MODE_FLAG_PWM) return response.percent; diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c index 61753b648506..5d21c6adf1ab 100644 --- a/drivers/platform/mellanox/mlxbf-bootctl.c +++ b/drivers/platform/mellanox/mlxbf-bootctl.c @@ -309,7 +309,7 @@ static struct platform_driver mlxbf_bootctl_driver = { .probe = mlxbf_bootctl_probe, .driver = { .name = "mlxbf-bootctl", - .groups = mlxbf_bootctl_groups, + .dev_groups = mlxbf_bootctl_groups, .acpi_match_table = mlxbf_bootctl_acpi_ids, } }; diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 9a5c9fd2dbc6..5739a9669b29 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info { * @work: work struct for deferred process * @timer: background timer * @vring: Tx/Rx ring - * @spin_lock: spin lock + * @spin_lock: Tx/Rx spin lock * @is_ready: ready flag */ struct mlxbf_tmfifo { @@ -164,7 +164,7 @@ struct mlxbf_tmfifo { struct work_struct work; struct timer_list timer; struct mlxbf_tmfifo_vring *vring[2]; - spinlock_t spin_lock; /* spin lock */ + spinlock_t spin_lock[2]; /* spin lock */ bool is_ready; }; @@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); /* Use spin-lock to protect the 'cons->tx_buf'. */ - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[0], flags); while (size > 0) { addr = cons->tx_buf.buf + cons->tx_buf.tail; @@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) } } - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); } /* Rx/Tx one word in the descriptor buffer. */ @@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, fifo->vring[is_rx] = NULL; /* Notify upper layer that packet is done. */ - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); vring_interrupt(0, vring->vq); - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); } mlxbf_tmfifo_desc_done: @@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) * worker handler. */ if (vring->vdev_id == VIRTIO_ID_CONSOLE) { - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[0], flags); tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; mlxbf_tmfifo_console_output(tm_vdev, vring); - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events)) { return true; @@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev) if (!fifo) return -ENOMEM; - spin_lock_init(&fifo->spin_lock); + spin_lock_init(&fifo->spin_lock[0]); + spin_lock_init(&fifo->spin_lock[1]); INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); mutex_init(&fifo->lock); diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig index f4d0a86c00d0..5e77b0dc5fd6 100644 --- a/drivers/platform/mips/Kconfig +++ b/drivers/platform/mips/Kconfig @@ -18,7 +18,7 @@ if MIPS_PLATFORM_DEVICES config CPU_HWMON tristate "Loongson-3 CPU HWMon Driver" - depends on CONFIG_MACH_LOONGSON64 + depends on MACH_LOONGSON64 select HWMON default y help diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 821b08e01635..982f0cc8270c 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -512,13 +512,7 @@ static void kbd_led_update(struct asus_wmi *asus) { int ctrl_param = 0; - /* - * bits 0-2: level - * bit 7: light on/off - */ - if (asus->kbd_led_wk > 0) - ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); - + ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL); } diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c index be85ed966bf3..b471b86c28fe 100644 --- a/drivers/platform/x86/gpd-pocket-fan.c +++ b/drivers/platform/x86/gpd-pocket-fan.c @@ -16,17 +16,27 @@ #define MAX_SPEED 3 -static int temp_limits[3] = { 55000, 60000, 65000 }; +#define TEMP_LIMIT0_DEFAULT 55000 +#define TEMP_LIMIT1_DEFAULT 60000 +#define TEMP_LIMIT2_DEFAULT 65000 + +#define HYSTERESIS_DEFAULT 3000 + +#define SPEED_ON_AC_DEFAULT 2 + +static int temp_limits[3] = { + TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT, +}; module_param_array(temp_limits, int, NULL, 0444); MODULE_PARM_DESC(temp_limits, "Millicelsius values above which the fan speed increases"); -static int hysteresis = 3000; +static int hysteresis = HYSTERESIS_DEFAULT; module_param(hysteresis, int, 0444); MODULE_PARM_DESC(hysteresis, "Hysteresis in millicelsius before lowering the fan speed"); -static int speed_on_ac = 2; +static int speed_on_ac = SPEED_ON_AC_DEFAULT; module_param(speed_on_ac, int, 0444); MODULE_PARM_DESC(speed_on_ac, "minimum fan speed to allow when system is powered by AC"); @@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev) int i; for (i = 0; i < ARRAY_SIZE(temp_limits); i++) { - if (temp_limits[i] < 40000 || temp_limits[i] > 70000) { + if (temp_limits[i] < 20000 || temp_limits[i] > 90000) { dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n", temp_limits[i]); - return -EINVAL; + temp_limits[0] = TEMP_LIMIT0_DEFAULT; + temp_limits[1] = TEMP_LIMIT1_DEFAULT; + temp_limits[2] = TEMP_LIMIT2_DEFAULT; + break; } } if (hysteresis < 1000 || hysteresis > 10000) { dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n", hysteresis); - return -EINVAL; + hysteresis = HYSTERESIS_DEFAULT; } if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) { dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n", speed_on_ac); - return -EINVAL; + speed_on_ac = SPEED_ON_AC_DEFAULT; } fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 9579a706fc08..a881b709af25 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -300,7 +300,7 @@ static int __init hp_wmi_bios_2008_later(void) static int __init hp_wmi_bios_2009_later(void) { - int state = 0; + u8 state[128]; int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state, sizeof(state), sizeof(state)); if (!ret) diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h index 512ad234ad0d..35ed9711c7b9 100644 --- a/drivers/platform/x86/intel_ips.h +++ b/drivers/platform/x86/intel_ips.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010 Intel Corporation */ diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index fdee5772e532..8203ae38dc46 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Intel Core SoC Power Management Controller Header File * diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index 6fe829f30997..e1266f5c6359 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = { INTEL_CPU_FAM6(KABYLAKE, pmc_core_device), INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device), INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device), + INTEL_CPU_FAM6(COMETLAKE, pmc_core_device), + INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids); diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index 48b112b4f0b0..9b11ef1a401f 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -2,7 +2,7 @@ /* * PC-Engines APUv2/APUv3 board platform driver - * for gpio buttons and LEDs + * for GPIO buttons and LEDs * * Copyright (C) 2018 metux IT consult * Author: Enrico Weigelt <info@metux.net> @@ -23,10 +23,10 @@ /* * NOTE: this driver only supports APUv2/3 - not APUv1, as this one - * has completely different register layouts + * has completely different register layouts. */ -/* register mappings */ +/* Register mappings */ #define APU2_GPIO_REG_LED1 AMD_FCH_GPIO_REG_GPIO57 #define APU2_GPIO_REG_LED2 AMD_FCH_GPIO_REG_GPIO58 #define APU2_GPIO_REG_LED3 AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 @@ -35,7 +35,7 @@ #define APU2_GPIO_REG_MPCIE2 AMD_FCH_GPIO_REG_GPIO59_DEVSLP0 #define APU2_GPIO_REG_MPCIE3 AMD_FCH_GPIO_REG_GPIO51 -/* order in which the gpio lines are defined in the register list */ +/* Order in which the GPIO lines are defined in the register list */ #define APU2_GPIO_LINE_LED1 0 #define APU2_GPIO_LINE_LED2 1 #define APU2_GPIO_LINE_LED3 2 @@ -44,7 +44,7 @@ #define APU2_GPIO_LINE_MPCIE2 5 #define APU2_GPIO_LINE_MPCIE3 6 -/* gpio device */ +/* GPIO device */ static int apu2_gpio_regs[] = { [APU2_GPIO_LINE_LED1] = APU2_GPIO_REG_LED1, @@ -72,7 +72,7 @@ static const struct amd_fch_gpio_pdata board_apu2 = { .gpio_names = apu2_gpio_names, }; -/* gpio leds device */ +/* GPIO LEDs device */ static const struct gpio_led apu2_leds[] = { { .name = "apu:green:1" }, @@ -95,12 +95,12 @@ static struct gpiod_lookup_table gpios_led_table = { NULL, 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3, NULL, 2, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_REG_SIMSWAP, + GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_SIMSWAP, NULL, 3, GPIO_ACTIVE_LOW), } }; -/* gpio keyboard device */ +/* GPIO keyboard device */ static struct gpio_keys_button apu2_keys_buttons[] = { { @@ -129,12 +129,12 @@ static struct gpiod_lookup_table gpios_key_table = { } }; -/* board setup */ +/* Board setup */ -/* note: matching works on string prefix, so "apu2" must come before "apu" */ +/* Note: matching works on string prefix, so "apu2" must come before "apu" */ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { - /* APU2 w/ legacy bios < 4.0.8 */ + /* APU2 w/ legacy BIOS < 4.0.8 */ { .ident = "apu2", .matches = { @@ -143,7 +143,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, - /* APU2 w/ legacy bios >= 4.0.8 */ + /* APU2 w/ legacy BIOS >= 4.0.8 */ { .ident = "apu2", .matches = { @@ -152,7 +152,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, - /* APU2 w/ maainline bios */ + /* APU2 w/ mainline BIOS */ { .ident = "apu2", .matches = { @@ -162,7 +162,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { .driver_data = (void *)&board_apu2, }, - /* APU3 w/ legacy bios < 4.0.8 */ + /* APU3 w/ legacy BIOS < 4.0.8 */ { .ident = "apu3", .matches = { @@ -171,7 +171,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, - /* APU3 w/ legacy bios >= 4.0.8 */ + /* APU3 w/ legacy BIOS >= 4.0.8 */ { .ident = "apu3", .matches = { @@ -180,7 +180,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, - /* APU3 w/ mainline bios */ + /* APU3 w/ mainline BIOS */ { .ident = "apu3", .matches = { @@ -189,6 +189,33 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, + /* APU4 w/ legacy BIOS < 4.0.8 */ + { + .ident = "apu4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "APU4") + }, + .driver_data = (void *)&board_apu2, + }, + /* APU4 w/ legacy BIOS >= 4.0.8 */ + { + .ident = "apu4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "apu4") + }, + .driver_data = (void *)&board_apu2, + }, + /* APU4 w/ mainline BIOS */ + { + .ident = "apu4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu4") + }, + .driver_data = (void *)&board_apu2, + }, {} }; @@ -223,7 +250,7 @@ static int __init apu_board_init(void) id = dmi_first_match(apu_gpio_dmi_table); if (!id) { - pr_err("failed to detect apu board via dmi\n"); + pr_err("failed to detect APU board via DMI\n"); return -ENODEV; } @@ -262,7 +289,7 @@ module_init(apu_board_init); module_exit(apu_board_exit); MODULE_AUTHOR("Enrico Weigelt, metux IT consult <info@metux.net>"); -MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver"); +MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LEDs/keys driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); MODULE_ALIAS("platform:pcengines-apuv2"); diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 07d1b911e72f..52ef1419b671 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -429,6 +429,14 @@ static const struct dmi_system_id critclk_systems[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"), }, }, + { + .ident = "CONNECT X300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"), + DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"), + }, + }, + { /*sentinel*/ } }; diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index a67701ed93e8..2e5b6a6834da 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1295,6 +1295,9 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv) struct cpuinfo_x86 *c = &cpu_data(cpu); int ret; + if (!rapl_defaults) + return ERR_PTR(-ENODEV); + rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); if (!rp) return ERR_PTR(-ENOMEM); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index b45d2b86d8ca..b0d1b8d264fa 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -121,7 +121,7 @@ config PTP_1588_CLOCK_KVM config PTP_1588_CLOCK_IDTCM tristate "IDT CLOCKMATRIX as PTP clock" - depends on PTP_1588_CLOCK + depends on PTP_1588_CLOCK && I2C default n help This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index e60eab7f8a61..b84f16bbd6f2 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -166,10 +166,11 @@ static struct posix_clock_operations ptp_clock_ops = { .read = ptp_read, }; -static void delete_ptp_clock(struct posix_clock *pc) +static void ptp_clock_release(struct device *dev) { - struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); + ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); @@ -213,7 +214,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } ptp->clock.ops = ptp_clock_ops; - ptp->clock.release = delete_ptp_clock; ptp->info = info; ptp->devid = MKDEV(major, index); ptp->index = index; @@ -236,15 +236,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, if (err) goto no_pin_groups; - /* Create a new device in our class. */ - ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid, - ptp, ptp->pin_attr_groups, - "ptp%d", ptp->index); - if (IS_ERR(ptp->dev)) { - err = PTR_ERR(ptp->dev); - goto no_device; - } - /* Register a new PPS source. */ if (info->pps) { struct pps_source_info pps; @@ -260,8 +251,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } } - /* Create a posix clock. */ - err = posix_clock_register(&ptp->clock, ptp->devid); + /* Initialize a new device of our class in our clock structure. */ + device_initialize(&ptp->dev); + ptp->dev.devt = ptp->devid; + ptp->dev.class = ptp_class; + ptp->dev.parent = parent; + ptp->dev.groups = ptp->pin_attr_groups; + ptp->dev.release = ptp_clock_release; + dev_set_drvdata(&ptp->dev, ptp); + dev_set_name(&ptp->dev, "ptp%d", ptp->index); + + /* Create a posix clock and link it to the device. */ + err = posix_clock_register(&ptp->clock, &ptp->dev); if (err) { pr_err("failed to create posix clock\n"); goto no_clock; @@ -273,8 +274,6 @@ no_clock: if (ptp->pps_source) pps_unregister_source(ptp->pps_source); no_pps: - device_destroy(ptp_class, ptp->devid); -no_device: ptp_cleanup_pin_groups(ptp); no_pin_groups: if (ptp->kworker) @@ -304,10 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp) if (ptp->pps_source) pps_unregister_source(ptp->pps_source); - device_destroy(ptp_class, ptp->devid); - ptp_cleanup_pin_groups(ptp); - posix_clock_unregister(&ptp->clock); + return 0; } EXPORT_SYMBOL(ptp_clock_unregister); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 9171d42468fd..6b97155148f1 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -28,7 +28,7 @@ struct timestamp_event_queue { struct ptp_clock { struct posix_clock clock; - struct device *dev; + struct device dev; struct ptp_clock_info *info; dev_t devid; int index; /* index into clocks.map */ diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 989506bd90b1..16f0c8570036 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) int i; for (i = 0; i < rate_count; i++) { - if (ramp <= slew_rates[i]) - cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); - else + if (ramp > slew_rates[i]) break; + + if (id == AXP20X_DCDC2) + cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i); + else + cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); } if (cfg == 0xff) { @@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = { AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100, AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, - AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100, AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c index ec764022621f..5bf8a2dc5fe7 100644 --- a/drivers/regulator/bd70528-regulator.c +++ b/drivers/regulator/bd70528-regulator.c @@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, - .set_ramp_delay = bd70528_set_ramp_delay, }; static const struct regulator_ops bd70528_led_ops = { diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 679ad3d2ed23..03d79fee2987 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1198,6 +1198,10 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, return -EINVAL; } + /* no need to loop voltages if range is continuous */ + if (rdev->desc->continuous_voltage_range) + return 0; + /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */ for (i = 0; i < count; i++) { int value; @@ -1938,8 +1942,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id, regulator = create_regulator(rdev, dev, id); if (regulator == NULL) { regulator = ERR_PTR(-ENOMEM); - put_device(&rdev->dev); module_put(rdev->owner); + put_device(&rdev->dev); return regulator; } @@ -2063,13 +2067,13 @@ static void _regulator_put(struct regulator *regulator) rdev->open_count--; rdev->exclusive = 0; - put_device(&rdev->dev); regulator_unlock(rdev); kfree_const(regulator->supply_name); kfree(regulator); module_put(rdev->owner); + put_device(&rdev->dev); } /** @@ -5002,6 +5006,7 @@ regulator_register(const struct regulator_desc *regulator_desc, struct regulator_dev *rdev; bool dangling_cfg_gpiod = false; bool dangling_of_gpiod = false; + bool reg_device_fail = false; struct device *dev; int ret, i; @@ -5187,7 +5192,7 @@ regulator_register(const struct regulator_desc *regulator_desc, dev_set_drvdata(&rdev->dev, rdev); ret = device_register(&rdev->dev); if (ret != 0) { - put_device(&rdev->dev); + reg_device_fail = true; goto unset_supplies; } @@ -5218,7 +5223,10 @@ wash: clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); - kfree(rdev); + if (reg_device_fail) + put_device(&rdev->dev); + else + kfree(rdev); kfree(config); rinse: if (dangling_cfg_gpiod) diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c index e57fc9197d62..ac89a412f665 100644 --- a/drivers/regulator/max77650-regulator.c +++ b/drivers/regulator/max77650-regulator.c @@ -386,9 +386,16 @@ static int max77650_regulator_probe(struct platform_device *pdev) return 0; } +static const struct of_device_id max77650_regulator_of_match[] = { + { .compatible = "maxim,max77650-regulator" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_regulator_of_match); + static struct platform_driver max77650_regulator_driver = { .driver = { .name = "max77650-regulator", + .of_match_table = max77650_regulator_of_match, }, .probe = max77650_regulator_probe, }; diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c index 4a91be0ad5ae..5c12d57be040 100644 --- a/drivers/regulator/rn5t618-regulator.c +++ b/drivers/regulator/rn5t618-regulator.c @@ -148,6 +148,7 @@ static struct platform_driver rn5t618_regulator_driver = { module_platform_driver(rn5t618_regulator_driver); +MODULE_ALIAS("platform:rn5t618-regulator"); MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); MODULE_DESCRIPTION("RN5T618 regulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index bdc07739e9a2..12d6b8d2e97e 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -588,7 +588,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, if (of_property_read_u32(reg_np, "op_mode", &rmode->mode)) { dev_warn(iodev->dev, - "no op_mode property property at %pOF\n", + "no op_mode property at %pOF\n", reg_np); rmode->mode = S5M8767_OPMODE_NORMAL_MODE; diff --git a/drivers/reset/core.c b/drivers/reset/core.c index ca1d49146f61..7597c70e04d5 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -787,7 +787,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev, return ERR_PTR(-ENOMEM); rstc = __reset_control_get(dev, id, index, shared, optional, acquired); - if (!IS_ERR(rstc)) { + if (!IS_ERR_OR_NULL(rstc)) { *ptr = rstc; devres_add(dev, ptr); } else { @@ -861,8 +861,7 @@ static int of_reset_control_get_count(struct device_node *node) * @acquired: only one reset control may be acquired for a given controller * and ID * - * Returns pointer to allocated reset_control_array on success or - * error on failure + * Returns pointer to allocated reset_control on success or error on failure */ struct reset_control * of_reset_control_array_get(struct device_node *np, bool shared, bool optional, @@ -915,8 +914,7 @@ EXPORT_SYMBOL_GPL(of_reset_control_array_get); * that just have to be asserted or deasserted, without any * requirements on the order. * - * Returns pointer to allocated reset_control_array on success or - * error on failure + * Returns pointer to allocated reset_control on success or error on failure */ struct reset_control * devm_reset_control_array_get(struct device *dev, bool shared, bool optional) @@ -930,7 +928,7 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional) return ERR_PTR(-ENOMEM); rstc = of_reset_control_array_get(dev->of_node, shared, optional, true); - if (IS_ERR(rstc)) { + if (IS_ERR_OR_NULL(rstc)) { devres_free(devres); return rstc; } diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c index a608f445dad6..f213264c8567 100644 --- a/drivers/reset/reset-brcmstb.c +++ b/drivers/reset/reset-brcmstb.c @@ -91,12 +91,6 @@ static int brcmstb_reset_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) || - !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) { - dev_err(kdev, "incorrect register range\n"); - return -EINVAL; - } - priv->base = devm_ioremap_resource(kdev, res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index df2829dd55ad..2ecd8752b088 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time) save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - -#ifdef CONFIG_X86 - if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 == 0x17) || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)), - RTC_FREQ_SELECT); - save_freq_select &= ~RTC_DIV_RESET2; - } else - CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), - RTC_FREQ_SELECT); -#else - CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT); -#endif + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 5249fc99fd5f..9135e2101752 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data) irqen = irqsta & ~RTC_IRQ_EN_AL; mutex_lock(&rtc->lock); if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN, - irqen) < 0) + irqen) == 0) mtk_rtc_write_trigger(rtc); mutex_unlock(&rtc->lock); @@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM); mutex_unlock(&rtc->lock); - tm->tm_sec = data[RTC_OFFSET_SEC]; - tm->tm_min = data[RTC_OFFSET_MIN]; - tm->tm_hour = data[RTC_OFFSET_HOUR]; - tm->tm_mday = data[RTC_OFFSET_DOM]; - tm->tm_mon = data[RTC_OFFSET_MTH]; - tm->tm_year = data[RTC_OFFSET_YEAR]; + tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK; + tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK; + tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK; + tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK; + tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK; + tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK; tm->tm_year += RTC_MIN_YEAR_OFFSET; tm->tm_mon--; @@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) tm->tm_year -= RTC_MIN_YEAR_OFFSET; tm->tm_mon++; - data[RTC_OFFSET_SEC] = tm->tm_sec; - data[RTC_OFFSET_MIN] = tm->tm_min; - data[RTC_OFFSET_HOUR] = tm->tm_hour; - data[RTC_OFFSET_DOM] = tm->tm_mday; - data[RTC_OFFSET_MTH] = tm->tm_mon; - data[RTC_OFFSET_YEAR] = tm->tm_year; - mutex_lock(&rtc->lock); + ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC, + data, RTC_OFFSET_COUNT); + if (ret < 0) + goto exit; + + data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) | + (tm->tm_sec & RTC_AL_SEC_MASK)); + data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) | + (tm->tm_min & RTC_AL_MIN_MASK)); + data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) | + (tm->tm_hour & RTC_AL_HOU_MASK)); + data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) | + (tm->tm_mday & RTC_AL_DOM_MASK)); + data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) | + (tm->tm_mon & RTC_AL_MTH_MASK)); + data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) | + (tm->tm_year & RTC_AL_YEA_MASK)); + if (alm->enabled) { ret = regmap_bulk_write(rtc->regmap, rtc->addr_base + RTC_AL_SEC, diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 8dcd20b34dde..852f5f3b3592 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node) CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc", sun50i_h6_rtc_clk_init); +/* + * The R40 user manual is self-conflicting on whether the prescaler is + * fixed or configurable. The clock diagram shows it as fixed, but there + * is also a configurable divider in the RTC block. + */ +static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = { + .rc_osc_rate = 16000000, + .fixed_prescaler = 512, +}; +static void __init sun8i_r40_rtc_clk_init(struct device_node *node) +{ + sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data); +} +CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc", + sun8i_r40_rtc_clk_init); + static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = { .rc_osc_rate = 32000, .has_out_clk = 1, diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index c94184d080f8..a28b9ff82378 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1128,7 +1128,8 @@ static u32 get_fcx_max_data(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; int fcx_in_css, fcx_in_gneq, fcx_in_features; - int tpm, mdc; + unsigned int mdc; + int tpm; if (dasd_nofcx) return 0; @@ -1142,7 +1143,7 @@ static u32 get_fcx_max_data(struct dasd_device *device) return 0; mdc = ccw_device_get_mdc(device->cdev, 0); - if (mdc < 0) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); return 0; } else { @@ -1153,12 +1154,12 @@ static u32 get_fcx_max_data(struct dasd_device *device) static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) { struct dasd_eckd_private *private = device->private; - int mdc; + unsigned int mdc; u32 fcx_max_data; if (private->fcx_max_data) { mdc = ccw_device_get_mdc(device->cdev, lpm); - if ((mdc < 0)) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum data size for zHPF " "requests failed (rc=%d) for a new path %x\n", @@ -2073,7 +2074,7 @@ out_err2: dasd_free_block(device->block); device->block = NULL; out_err1: - kfree(private->conf_data); + dasd_eckd_clear_conf_data(device); kfree(device->private); device->private = NULL; return rc; @@ -2082,7 +2083,6 @@ out_err1: static void dasd_eckd_uncheck_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - int i; if (!private) return; @@ -2092,21 +2092,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) private->sneq = NULL; private->vdsneq = NULL; private->gneq = NULL; - private->conf_len = 0; - for (i = 0; i < 8; i++) { - kfree(device->path[i].conf_data); - if ((__u8 *)device->path[i].conf_data == - private->conf_data) { - private->conf_data = NULL; - private->conf_len = 0; - } - device->path[i].conf_data = NULL; - device->path[i].cssid = 0; - device->path[i].ssid = 0; - device->path[i].chpid = 0; - } - kfree(private->conf_data); - private->conf_data = NULL; + dasd_eckd_clear_conf_data(device); } static struct dasd_ccw_req * diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h index 8f75df06e893..45ddabec4017 100644 --- a/drivers/s390/block/dasd_fba.h +++ b/drivers/s390/block/dasd_fba.h @@ -2,7 +2,7 @@ /* * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * Coypright IBM Corp. 1999, 2000 + * Copyright IBM Corp. 1999, 2000 * */ diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 1770b99f607e..8d4d69ea5baf 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -5,7 +5,7 @@ * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * Coypright IBM Corp. 1999, 2002 + * Copyright IBM Corp. 1999, 2002 * * /proc interface for the dasd driver. * diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 65841af15748..ccecf6b9504e 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -635,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); * @mask: mask of paths to use * * Return the number of 64K-bytes blocks all paths at least support - * for a transport command. Return values <= 0 indicate failures. + * for a transport command. Return value 0 indicates failure. */ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) { diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a1915061932e..5256e3ce84e5 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev) drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) return -ENODEV; - /* (re-)init queue's state machine */ - ap_queue_reinit_state(to_ap_queue(dev)); } /* Add queue/card to list of active queues/cards */ diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 433b7b64368d..bb35ba4a8d24 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev); -void ap_queue_reinit_state(struct ap_queue *aq); +void ap_queue_init_state(struct ap_queue *aq); struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, int comp_device_type, unsigned int functions); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index dad2be333d82..37c3bdc3642d 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) aq->ap_dev.device.type = &ap_queue_type; aq->ap_dev.device_type = device_type; aq->qid = qid; - aq->state = AP_STATE_RESET_START; + aq->state = AP_STATE_UNBOUND; aq->interrupt = AP_INTR_DISABLED; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->list); @@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq) spin_unlock_bh(&aq->lock); } -void ap_queue_reinit_state(struct ap_queue *aq) +void ap_queue_init_state(struct ap_queue *aq) { spin_lock_bh(&aq->lock); aq->state = AP_STATE_RESET_START; ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); } +EXPORT_SYMBOL(ap_queue_init_state); diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index c1db64a2db21..110fe9d0cb91 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prepparm = (struct iprepparm *) prepcblk->rpl_parmb; /* do some plausibility checks on the key block */ - if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || - prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) { + if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || + prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) { DEBUG_ERR("%s reply with invalid or unknown key block\n", __func__); rc = -EIO; diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index c50f3e86cc74..7cbb384ec535 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2A_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c index 35c7c6672713..c78c0d119806 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.c +++ b/drivers/s390/crypto/zcrypt_cex2c.c @@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_rapq(aq->qid); rc = zcrypt_cex2c_rng_supported(aq); if (rc < 0) { zcrypt_queue_free(zq); @@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) else zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, MSGTYPE06_VARIANT_NORNG); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2C_CLEANUP_TIME; aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 442e3d6162f7..6fabc906114c 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX4_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 33a62a6692c0..29facb913671 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -655,17 +655,17 @@ static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); - if ((buffer[2] & 0xc0) == 0xc0) { + if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", buffer[4]); QETH_CARD_TEXT(card, 2, "ckidxres"); QETH_CARD_TEXT(card, 2, " idxterm"); - QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); - if (buffer[4] == 0xf6) { + QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); + if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || + buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { dev_err(&card->gdev->dev, - "The qeth device is not configured " - "for the OSI layer required by z/VM\n"); - return -EPERM; + "The device does not support the configured transport mode\n"); + return -EPROTONOSUPPORT; } return -EIO; } @@ -742,10 +742,10 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, case 0: break; case -EIO: - qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); /* fall through */ default: + qeth_clear_ipacmd_list(card); goto out; } @@ -2482,50 +2482,46 @@ static int qeth_mpc_initialize(struct qeth_card *card) rc = qeth_cm_enable(card); if (rc) { QETH_CARD_TEXT_(card, 2, "2err%d", rc); - goto out_qdio; + return rc; } rc = qeth_cm_setup(card); if (rc) { QETH_CARD_TEXT_(card, 2, "3err%d", rc); - goto out_qdio; + return rc; } rc = qeth_ulp_enable(card); if (rc) { QETH_CARD_TEXT_(card, 2, "4err%d", rc); - goto out_qdio; + return rc; } rc = qeth_ulp_setup(card); if (rc) { QETH_CARD_TEXT_(card, 2, "5err%d", rc); - goto out_qdio; + return rc; } rc = qeth_alloc_qdio_queues(card); if (rc) { QETH_CARD_TEXT_(card, 2, "5err%d", rc); - goto out_qdio; + return rc; } rc = qeth_qdio_establish(card); if (rc) { QETH_CARD_TEXT_(card, 2, "6err%d", rc); qeth_free_qdio_queues(card); - goto out_qdio; + return rc; } rc = qeth_qdio_activate(card); if (rc) { QETH_CARD_TEXT_(card, 2, "7err%d", rc); - goto out_qdio; + return rc; } rc = qeth_dm_act(card); if (rc) { QETH_CARD_TEXT_(card, 2, "8err%d", rc); - goto out_qdio; + return rc; } return 0; -out_qdio: - qeth_qdio_clear_card(card, !IS_IQD(card)); - qdio_free(CARD_DDEV(card)); - return rc; } void qeth_print_status_message(struct qeth_card *card) @@ -3429,11 +3425,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) goto out; } - if (card->state != CARD_STATE_DOWN) { - rc = -1; - goto out; - } - qeth_free_qdio_queues(card); card->options.cq = cq; rc = 0; @@ -5035,10 +5026,8 @@ retriable: } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); - if (rc < 0) { + if (rc) QETH_CARD_TEXT_(card, 2, "8err%d", rc); - goto out; - } } if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index c1ecce95094d..458db34239a7 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -899,6 +899,11 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; #define QETH_IDX_ACT_ERR_AUTH 0x1E #define QETH_IDX_ACT_ERR_AUTH_USER 0x20 +#define QETH_IDX_TERMINATE 0xc0 +#define QETH_IDX_TERMINATE_MASK 0xc0 +#define QETH_IDX_TERM_BAD_TRANSPORT 0x41 +#define QETH_IDX_TERM_BAD_TRANSPORT_VM 0xf6 + #define PDU_ENCAPSULATION(buffer) \ (buffer + *(buffer + (*(buffer + 0x0b)) + \ *(buffer + *(buffer + 0x0b) + 0x11) + 0x07)) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index e81170ab6d9a..7bd86027f559 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -207,7 +207,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; } else if (sysfs_streq(buf, "prio_queueing_vlan")) { if (IS_LAYER3(card)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto out; } card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9086bc04fa6b..47d37e75dda6 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -287,14 +287,15 @@ static void qeth_l2_stop_card(struct qeth_card *card) card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { - qeth_qdio_clear_card(card, 0); qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } + qeth_qdio_clear_card(card, 0); flush_workqueue(card->event_wq); card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + card->info.promisc_mode = 0; } static int qeth_l2_process_inbound_buffer(struct qeth_card *card, @@ -1951,8 +1952,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) /* check if VNICC is currently enabled */ bool qeth_l2_vnicc_is_in_use(struct qeth_card *card) { - /* if everything is turned off, VNICC is not active */ - if (!card->options.vnicc.cur_chars) + if (!card->options.vnicc.sup_chars) return false; /* default values are only OK if rx_bcast was not enabled by user * or the card is offline. @@ -2039,8 +2039,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card) /* enforce assumed default values and recover settings, if changed */ error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, timeout); - chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; - chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE; + /* Change chars, if necessary */ + chars_tmp = card->options.vnicc.wanted_chars ^ + card->options.vnicc.cur_chars; chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; for_each_set_bit(i, &chars_tmp, chars_len) { vnicc = BIT(i); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index f70c7aac2dcc..7fa325cf6f8d 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -262,7 +262,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) return; mutex_lock(&card->sbp_lock); - if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { + if (!card->options.sbp.reflect_promisc && + card->options.sbp.role != QETH_SBP_ROLE_NONE) { /* Conditional to avoid spurious error messages */ qeth_bridgeport_setrole(card, card->options.sbp.role); /* Let the callback function refresh the stored role value. */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 27126330a4b0..5508ab89b518 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1307,13 +1307,14 @@ static void qeth_l3_stop_card(struct qeth_card *card) card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { - qeth_qdio_clear_card(card, 0); qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } + qeth_qdio_clear_card(card, 0); flush_workqueue(card->event_wq); + card->info.promisc_mode = 0; } static void qeth_l3_set_promisc_mode(struct qeth_card *card) diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index f9067ed6c7d3..e8c848f72c6d 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -242,21 +242,33 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; char *tmp; - int rc; if (!IS_IQD(card)) return -EPERM; - if (card->state != CARD_STATE_DOWN) - return -EPERM; - if (card->options.sniffer) - return -EPERM; - if (card->options.cq == QETH_CQ_NOTAVAILABLE) - return -EPERM; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + if (card->options.sniffer) { + rc = -EPERM; + goto out; + } + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -EPERM; + goto out; + } tmp = strsep((char **)&buf, "\n"); - if (strlen(tmp) > 8) - return -EINVAL; + if (strlen(tmp) > 8) { + rc = -EINVAL; + goto out; + } if (card->options.hsuid[0]) /* delete old ip address */ @@ -267,11 +279,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, card->options.hsuid[0] = '\0'; memcpy(card->dev->perm_addr, card->options.hsuid, 9); qeth_configure_cq(card, QETH_CQ_DISABLED); - return count; + goto out; } - if (qeth_configure_cq(card, QETH_CQ_ENABLED)) - return -EPERM; + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) { + rc = -EPERM; + goto out; + } snprintf(card->options.hsuid, sizeof(card->options.hsuid), "%-8s", tmp); @@ -280,6 +294,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, rc = qeth_l3_modify_hsuid(card, true); +out: + mutex_unlock(&card->conf_mutex); return rc ? rc : count; } diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index c4e4b0136f86..4bc794d2f51c 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) "cdev 0x%p, p# %u.\n", cdev, cdev->nports); cxgbi_hbas_remove(cdev); cxgbi_device_portmap_cleanup(cdev); - cxgbi_ppm_release(cdev->cdev2ppm(cdev)); + if (cdev->cdev2ppm) + cxgbi_ppm_release(cdev->cdev2ppm(cdev)); if (cdev->pmap.max_connect) cxgbi_free_big_mem(cdev->pmap.port_csk); kfree(cdev); diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 1f55b9e4e74a..1b88a3b53eee 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) int vnic_dev_hang_notify(struct vnic_dev *vdev) { - u64 a0, a1; + u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); } int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { - u64 a0, a1; + u64 a[2] = {}; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; - err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait); if (err) return err; for (i = 0; i < ETH_ALEN; i++) - mac_addr[i] = ((u8 *)&a0)[i]; + mac_addr[i] = ((u8 *)&a)[i]; return 0; } @@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait); if (err) pr_err("Can't add addr [%pM], %d\n", addr, err); } void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait); if (err) pr_err("Can't del addr [%pM], %d\n", addr, err); } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 2e6a68d9ea4f..a5ecbce4eda2 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -5385,7 +5385,6 @@ static const struct file_operations lpfc_debugfs_ras_log = { .read = lpfc_debugfs_read, .release = lpfc_debugfs_ras_log_release, }; -#endif #undef lpfc_debugfs_op_dumpHBASlim static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { @@ -5557,7 +5556,7 @@ static const struct file_operations lpfc_idiag_op_extAcc = { .write = lpfc_idiag_extacc_write, .release = lpfc_idiag_cmd_release, }; - +#endif /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command * @phba: Pointer to HBA context object. diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6298b1729098..6a04fdb3fbf2 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -5883,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "1804 Invalid asynchrous event code: " + "1804 Invalid asynchronous event code: " "x%x\n", bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)); break; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index c82b5792da98..625c046ac4ef 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8555,7 +8555,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); - /* wake up worker thread to post asynchronlous mailbox command */ + /* wake up worker thread to post asynchronous mailbox command */ lpfc_worker_wake_up(phba); } @@ -8823,7 +8823,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, return rc; } - /* Now, interrupt mode asynchrous mailbox command */ + /* Now, interrupt mode asynchronous mailbox command */ rc = lpfc_mbox_cmd_check(phba, mboxq); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, @@ -13112,11 +13112,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) } /** - * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event + * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event * @phba: Pointer to HBA context object. * @cqe: Pointer to mailbox completion queue entry. * - * This routine process a mailbox completion queue entry with asynchrous + * This routine process a mailbox completion queue entry with asynchronous * event. * * Return: true if work posted to worker thread, otherwise false. @@ -13270,7 +13270,7 @@ out_no_mqe_complete: * @cqe: Pointer to mailbox completion queue entry. * * This routine process a mailbox completion queue entry, it invokes the - * proper mailbox complete handling or asynchrous event handling routine + * proper mailbox complete handling or asynchronous event handling routine * according to the MCQE's async bit. * * Return: true if work posted to worker thread, otherwise false. diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 848fbec7bda6..45fd8dfb7c40 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5248,7 +5248,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) &ct->chain_buffer_dma); if (!ct->chain_buffer) { ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n"); - _base_release_memory_pools(ioc); goto out; } } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index cea625906440..65ce10c7989c 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2211,8 +2211,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer u8 type; int ret = 0; - if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { + sdkp->protection_type = 0; return ret; + } type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index f8faf8b3d965..fb41636519ee 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1842,9 +1842,11 @@ static int storvsc_probe(struct hv_device *device, */ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); /* + * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. */ - host->nr_hw_queues = num_present_cpus(); + if (!dev_is_ide) + host->nr_hw_queues = num_present_cpus(); /* * Set the error handler work queue. diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 833e04a7835c..1778f8c62861 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -14,6 +14,7 @@ source "drivers/soc/qcom/Kconfig" source "drivers/soc/renesas/Kconfig" source "drivers/soc/rockchip/Kconfig" source "drivers/soc/samsung/Kconfig" +source "drivers/soc/sifive/Kconfig" source "drivers/soc/sunxi/Kconfig" source "drivers/soc/tegra/Kconfig" source "drivers/soc/ti/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 2ec355003524..8b49d782a1ab 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -20,6 +20,7 @@ obj-y += qcom/ obj-y += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ obj-$(CONFIG_SOC_SAMSUNG) += samsung/ +obj-$(CONFIG_SOC_SIFIVE) += sifive/ obj-y += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c index 5823f5b67d16..3f0261d53ad9 100644 --- a/drivers/soc/amlogic/meson-ee-pwrc.c +++ b/drivers/soc/amlogic/meson-ee-pwrc.c @@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, struct meson_ee_pwrc *pwrc, struct meson_ee_pwrc_domain *dom) { + int ret; + dom->pwrc = pwrc; dom->num_rstc = dom->desc.reset_names_count; dom->num_clks = dom->desc.clk_names_count; @@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, * prepare/enable counters won't be in sync. */ if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) { - int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); + ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); if (ret) return ret; - pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false); - } else - pm_genpd_init(&dom->base, NULL, - (dom->desc.get_power ? - dom->desc.get_power(dom) : true)); + ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov, + false); + if (ret) + return ret; + } else { + ret = pm_genpd_init(&dom->base, NULL, + (dom->desc.get_power ? + dom->desc.get_power(dom) : true)); + if (ret) + return ret; + } return 0; } @@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev) pwrc->xlate.domains[i] = &dom->base; } - of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); - - return 0; + return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); } static void meson_ee_pwrc_shutdown(struct platform_device *pdev) diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig new file mode 100644 index 000000000000..58cf8c40d08d --- /dev/null +++ b/drivers/soc/sifive/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +if SOC_SIFIVE + +config SIFIVE_L2 + bool "Sifive L2 Cache controller" + help + Support for the L2 cache controller on SiFive platforms. + +endif diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile new file mode 100644 index 000000000000..b5caff77938f --- /dev/null +++ b/drivers/soc/sifive/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SIFIVE_L2) += sifive_l2_cache.o diff --git a/arch/riscv/mm/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index a9ffff3277c7..a5069394cd61 100644 --- a/arch/riscv/mm/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -9,7 +9,7 @@ #include <linux/interrupt.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <asm/sifive_l2_cache.h> +#include <soc/sifive/sifive_l2_cache.h> #define SIFIVE_L2_DIRECCFIX_LOW 0x100 #define SIFIVE_L2_DIRECCFIX_HIGH 0x104 diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 378369d9364a..e9ece45d7a33 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc) ret = rproc_boot(m3_ipc->rproc); if (ret) dev_err(dev, "rproc_boot failed\n"); + else + m3_ipc_state = m3_ipc; do_exit(0); } @@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) goto err_put_rproc; } - m3_ipc_state = m3_ipc; - return 0; err_put_rproc: diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index c36587b42e95..82a0ee09cbe1 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -168,16 +168,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi) /** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure - * @enable: Select (1) or deselect (0) the chip select line + * @is_high: Select(0) or deselect (1) the chip select line */ -static void cdns_spi_chipselect(struct spi_device *spi, bool enable) +static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); - if (!enable) { + if (is_high) { /* Deselect the slave */ ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c index d12e149f1a41..fd6b9caffaf0 100644 --- a/drivers/spi/spi-cavium-thunderx.c +++ b/drivers/spi/spi-cavium-thunderx.c @@ -82,6 +82,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev, error: clk_disable_unprepare(p->clk); + pci_release_regions(pdev); spi_master_put(master); return ret; } @@ -96,6 +97,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev) return; clk_disable_unprepare(p->clk); + pci_release_regions(pdev); /* Put everything in a known state. */ writeq(0, p->register_base + OCTEON_SPI_CFG(p)); } diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index a92aa5cd4fbe..5a25da377119 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -129,10 +129,11 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) struct dw_spi *dws = spi_controller_get_devdata(spi->controller); struct chip_data *chip = spi_get_ctldata(spi); + /* Chip select logic is inverted from spi_set_cs() */ if (chip && chip->cs_control) - chip->cs_control(enable); + chip->cs_control(!enable); - if (enable) + if (!enable) dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); else if (dws->cs_override) dw_writel(dws, DW_SPI_SER, 0); @@ -171,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws) static void dw_writer(struct dw_spi *dws) { - u32 max = tx_max(dws); + u32 max; u16 txw = 0; + spin_lock(&dws->buf_lock); + max = tx_max(dws); while (max--) { /* Set the tx word if the transfer's original "tx" is not null */ if (dws->tx_end - dws->len) { @@ -185,13 +188,16 @@ static void dw_writer(struct dw_spi *dws) dw_write_io_reg(dws, DW_SPI_DR, txw); dws->tx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void dw_reader(struct dw_spi *dws) { - u32 max = rx_max(dws); + u32 max; u16 rxw; + spin_lock(&dws->buf_lock); + max = rx_max(dws); while (max--) { rxw = dw_read_io_reg(dws, DW_SPI_DR); /* Care rx only if the transfer's original "rx" is not null */ @@ -203,6 +209,7 @@ static void dw_reader(struct dw_spi *dws) } dws->rx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void int_error_stop(struct dw_spi *dws, const char *msg) @@ -275,18 +282,20 @@ static int dw_spi_transfer_one(struct spi_controller *master, { struct dw_spi *dws = spi_controller_get_devdata(master); struct chip_data *chip = spi_get_ctldata(spi); + unsigned long flags; u8 imask = 0; u16 txlevel = 0; u32 cr0; int ret; dws->dma_mapped = 0; - + spin_lock_irqsave(&dws->buf_lock, flags); dws->tx = (void *)transfer->tx_buf; dws->tx_end = dws->tx + transfer->len; dws->rx = transfer->rx_buf; dws->rx_end = dws->rx + transfer->len; dws->len = transfer->len; + spin_unlock_irqrestore(&dws->buf_lock, flags); spi_enable_chip(dws, 0); @@ -470,6 +479,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->type = SSI_MOTO_SPI; dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); + spin_lock_init(&dws->buf_lock); spi_controller_set_devdata(master, dws); diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 38c7de1f0aa9..1bf5713e047d 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -119,6 +119,7 @@ struct dw_spi { size_t len; void *tx; void *tx_end; + spinlock_t buf_lock; void *rx; void *rx_end; int dma_mapped; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 442cff71a0d2..8428b69c858b 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -185,6 +185,7 @@ struct fsl_dspi { struct spi_transfer *cur_transfer; struct spi_message *cur_msg; struct chip_data *cur_chip; + size_t progress; size_t len; const void *tx; void *rx; @@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi) dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { - /* Write two TX FIFO entries first, and then the corresponding - * CMD FIFO entry. + /* Write the CMD FIFO entry first, and then the two + * corresponding TX FIFO entries. */ u32 data = dspi_pop_tx(dspi); - if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) { - /* LSB */ - tx_fifo_write(dspi, data & 0xFFFF); - tx_fifo_write(dspi, data >> 16); - } else { - /* MSB */ - tx_fifo_write(dspi, data >> 16); - tx_fifo_write(dspi, data & 0xFFFF); - } cmd_fifo_write(dspi); + tx_fifo_write(dspi, data & 0xFFFF); + tx_fifo_write(dspi, data >> 16); } else { /* Write one entry to both TX FIFO and CMD FIFO * simultaneously. @@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) u32 spi_tcr; spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, - dspi->tx - dspi->bytes_per_word, !dspi->irq); + dspi->progress, !dspi->irq); /* Get transfer counter (in number of SPI transfers). It was * reset to 0 when transfer(s) were started. @@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr); /* Update total number of bytes that were transferred */ msg->actual_length += spi_tcnt * dspi->bytes_per_word; + dspi->progress += spi_tcnt; trans_mode = dspi->devtype_data->trans_mode; if (trans_mode == DSPI_EOQ_MODE) @@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) return 0; spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, - dspi->tx, !dspi->irq); + dspi->progress, !dspi->irq); if (trans_mode == DSPI_EOQ_MODE) dspi_eoq_write(dspi); @@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->rx = transfer->rx_buf; dspi->rx_end = dspi->rx + transfer->len; dspi->len = transfer->len; + dspi->progress = 0; /* Validated transfer specific frame size (defaults applied) */ dspi->bits_per_word = transfer->bits_per_word; if (transfer->bits_per_word <= 8) @@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_CTARE_DTCP(1)); spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, - dspi->tx, !dspi->irq); + dspi->progress, !dspi->irq); trans_mode = dspi->devtype_data->trans_mode; switch (trans_mode) { diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 114801a32371..fb4159ad6bf6 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -611,6 +611,7 @@ static struct spi_master * fsl_spi_probe(struct device *dev, master->setup = fsl_spi_setup; master->cleanup = fsl_spi_cleanup; master->transfer_one_message = fsl_spi_do_one_msg; + master->use_gpio_descriptors = true; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->max_bits_per_word = 32; @@ -727,17 +728,27 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) } } #endif - - pdata->cs_control = fsl_spi_cs_control; + /* + * Handle the case where we have one hardwired (always selected) + * device on the first "chipselect". Else we let the core code + * handle any GPIOs or native chip selects and assign the + * appropriate callback for dealing with the CS lines. This isn't + * supported on the GRLIB variant. + */ + ret = gpiod_count(dev, "cs"); + if (ret <= 0) + pdata->max_chipselect = 1; + else + pdata->cs_control = fsl_spi_cs_control; } ret = of_address_to_resource(np, 0, &mem); if (ret) goto err; - irq = irq_of_parse_and_map(np, 0); - if (!irq) { - ret = -EINVAL; + irq = platform_get_irq(ofdev, 0); + if (irq < 0) { + ret = irq; goto err; } @@ -750,7 +761,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) return 0; err: - irq_dispose_mapping(irq); return ret; } diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index c36bb1bb464e..8c5084a3a617 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -439,7 +439,7 @@ static bool nxp_fspi_supports_op(struct spi_mem *mem, op->data.nbytes > f->devtype_data->txfifo) return false; - return true; + return spi_mem_default_supports_op(mem, op); } /* Instead of busy looping invoke readl_poll_timeout functionality. */ diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 16b6b2ad4e7c..9071333ebdd8 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1443,6 +1443,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { { PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP }, + /* JSL */ + { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP }, /* APL */ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index 2ee1feb41681..6678f1cbc566 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -678,7 +678,7 @@ static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t) if (d->unit != SPI_DELAY_UNIT_SCK) return -EINVAL; - val = readl_relaxed(ss->base + SPRD_SPI_CTL7); + val = readl_relaxed(ss->base + SPRD_SPI_CTL0); val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX); /* Set default chip selection, clock phase and clock polarity */ val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX; diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 3cb65371ae3b..66dcb6128539 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -62,6 +62,7 @@ struct ti_qspi { u32 dc; bool mmap_enabled; + int current_cs; }; #define QSPI_PID (0x0) @@ -487,6 +488,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi) MEM_CS_EN(spi->chip_select)); } qspi->mmap_enabled = true; + qspi->current_cs = spi->chip_select; } static void ti_qspi_disable_memory_map(struct spi_device *spi) @@ -498,6 +500,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi) regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, MEM_CS_MASK, 0); qspi->mmap_enabled = false; + qspi->current_cs = -1; } static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, @@ -543,7 +546,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, mutex_lock(&qspi->list_lock); - if (!qspi->mmap_enabled) + if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) ti_qspi_enable_memory_map(mem->spi); ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, op->addr.nbytes, op->dummy.nbytes); @@ -799,6 +802,7 @@ no_dma: } } qspi->mmap_enabled = false; + qspi->current_cs = -1; ret = devm_spi_register_master(&pdev->dev, master); if (!ret) diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 47cde1864630..ce9b30112e26 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -290,25 +290,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv) } } -static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) +static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv, + unsigned int threshold) { - unsigned int fifo_threshold, fill_bytes; u32 val; - fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, - bytes_per_word(priv->bits_per_word)); - fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); - - fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes); - - /* set fifo threshold */ val = readl(priv->base + SSI_FC); val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK); - val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold); - val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold); + val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold); + val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold); writel(val, priv->base + SSI_FC); +} + +static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) +{ + unsigned int fifo_threshold, fill_words; + unsigned int bpw = bytes_per_word(priv->bits_per_word); + + fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw); + fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); + + uniphier_spi_set_fifo_threshold(priv, fifo_threshold); + + fill_words = fifo_threshold - + DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw); - while (fill_bytes--) + while (fill_words--) uniphier_spi_send(priv); } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 5e4c4532f7f3..8994545367a2 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work) * advances its @tx buffer pointer monotonically. * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped - * @tx: Pointer to the current word within the xfer->tx_buf that the driver is - * preparing to transmit right now. + * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will disable IRQs and preemption for the duration of the * transfer, for less jitter in time measurement. Only compatible * with PIO drivers. If true, must follow up with @@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work) */ void spi_take_timestamp_pre(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off) + size_t progress, bool irqs_off) { - u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8); - if (!xfer->ptp_sts) return; if (xfer->timestamped_pre) return; - if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word)) + if (progress < xfer->ptp_sts_word_pre) return; /* Capture the resolution of the timestamp */ - xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word; + xfer->ptp_sts_word_pre = progress; xfer->timestamped_pre = true; @@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); * timestamped. * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped - * @tx: Pointer to the current word within the xfer->tx_buf that the driver has - * just transmitted. + * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. */ void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off) + size_t progress, bool irqs_off) { - u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8); - if (!xfer->ptp_sts) return; if (xfer->timestamped_post) return; - if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word)) + if (progress < xfer->ptp_sts_word_post) return; ptp_read_system_postts(xfer->ptp_sts); @@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, } /* Capture the resolution of the timestamp */ - xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word; + xfer->ptp_sts_word_post = progress; xfer->timestamped_post = true; } diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig index 3fffe4d6f327..f180a8e9f58a 100644 --- a/drivers/staging/axis-fifo/Kconfig +++ b/drivers/staging/axis-fifo/Kconfig @@ -4,7 +4,7 @@ # config XIL_AXIS_FIFO tristate "Xilinx AXI-Stream FIFO IP core driver" - depends on OF + depends on OF && HAS_IOMEM help This adds support for the Xilinx AXI-Stream FIFO IP core driver. The AXI Streaming FIFO allows memory mapped access to a AXI Streaming diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index dbff0f7e7cf5..ddc0dc93d08b 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -46,8 +46,8 @@ #define PCI171X_RANGE_UNI BIT(4) #define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0) #define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */ -#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8) -#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0) +#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8) +#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0) #define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x)) #define PCI171X_STATUS_REG 0x06 /* R: status register */ #define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */ diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c index 4bdf44d82879..dc62db1ee1dd 100644 --- a/drivers/staging/comedi/drivers/gsc_hpdi.c +++ b/drivers/staging/comedi/drivers/gsc_hpdi.c @@ -623,6 +623,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE, &devpriv->dio_buffer_phys_addr[i], GFP_KERNEL); + if (!devpriv->dio_buffer[i]) { + dev_warn(dev->class_dev, + "failed to allocate DMA buffer\n"); + return -ENOMEM; + } } /* allocate dma descriptors */ devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev, @@ -630,6 +635,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, NUM_DMA_DESCRIPTORS, &devpriv->dma_desc_phys_addr, GFP_KERNEL); + if (!devpriv->dma_desc) { + dev_warn(dev->class_dev, + "failed to allocate DMA descriptors\n"); + return -ENOMEM; + } if (devpriv->dma_desc_phys_addr & 0xf) { dev_warn(dev->class_dev, " dma descriptors not quad-word aligned (bug)\n"); diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c index 673d732dcb8f..8f398b30f5bf 100644 --- a/drivers/staging/comedi/drivers/ni_routes.c +++ b/drivers/staging/comedi/drivers/ni_routes.c @@ -72,9 +72,6 @@ static int ni_find_device_routes(const char *device_family, } } - if (!rv) - return -ENODATA; - /* Second, find the set of routes valid for this device. */ for (i = 0; ni_device_routes_list[i]; ++i) { if (memcmp(ni_device_routes_list[i]->device, board_name, @@ -84,12 +81,12 @@ static int ni_find_device_routes(const char *device_family, } } - if (!dr) - return -ENODATA; - tables->route_values = rv; tables->valid_routes = dr; + if (!rv || !dr) + return -ENODATA; + return 0; } @@ -487,6 +484,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest, { int src; + if (!tables->route_values) + return -EINVAL; + dest = B(dest); /* subtract NI names offset */ /* ensure we are not going to under/over run the route value table */ if (dest < 0 || dest >= NI_NUM_NAMES) diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h index 08eaa0bad0de..1c9c3ba4d518 100644 --- a/drivers/staging/media/ipu3/include/intel-ipu3.h +++ b/drivers/staging/media/ipu3/include/intel-ipu3.h @@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s { __u16 reserved1; __u32 bayer_sign; __u8 bayer_nf; - __u8 reserved2[3]; + __u8 reserved2[7]; } __attribute__((aligned(32))) __packed; /** diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index a7cac0719b8b..b5d42f411dd8 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -37,6 +37,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c index 8d19ae71e7cc..4e651b698617 100644 --- a/drivers/staging/vt6656/baseband.c +++ b/drivers/staging/vt6656/baseband.c @@ -449,8 +449,8 @@ int vnt_vt3184_init(struct vnt_private *priv) memcpy(array, addr, length); - ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0, - MESSAGE_REQUEST_BBREG, length, array); + ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE, + MESSAGE_REQUEST_BBREG, length, array); if (ret) goto end; diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c index 56cd77fd9ea0..7958fc165462 100644 --- a/drivers/staging/vt6656/card.c +++ b/drivers/staging/vt6656/card.c @@ -719,7 +719,7 @@ end: */ int vnt_radio_power_on(struct vnt_private *priv) { - int ret = true; + int ret = 0; vnt_exit_deep_sleep(priv); diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h index 6074ceda78bf..50e1c8918040 100644 --- a/drivers/staging/vt6656/device.h +++ b/drivers/staging/vt6656/device.h @@ -259,6 +259,7 @@ struct vnt_private { u8 mac_hw; /* netdev */ struct usb_device *usb; + struct usb_interface *intf; u64 tsf_time; u8 rx_rate; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 4ac85ecb0921..9cb924c54571 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -949,7 +949,7 @@ static const struct ieee80211_ops vnt_mac_ops = { int vnt_init(struct vnt_private *priv) { - if (!(vnt_init_registers(priv))) + if (vnt_init_registers(priv)) return -EAGAIN; SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr); @@ -992,6 +992,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id) priv = hw->priv; priv->hw = hw; priv->usb = udev; + priv->intf = intf; vnt_set_options(priv); diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index d3304df6bd53..d977d4777e4f 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -59,7 +59,9 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, kfree(usb_buffer); - if (ret >= 0 && ret < (int)length) + if (ret == (int)length) + ret = 0; + else ret = -EIO; end_unlock: @@ -74,6 +76,23 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data) reg_off, reg, sizeof(u8), &data); } +int vnt_control_out_blocks(struct vnt_private *priv, + u16 block, u8 reg, u16 length, u8 *data) +{ + int ret = 0, i; + + for (i = 0; i < length; i += block) { + u16 len = min_t(int, length - i, block); + + ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, + i, reg, len, data + i); + if (ret) + goto end; + } +end: + return ret; +} + int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer) { @@ -103,7 +122,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, kfree(usb_buffer); - if (ret >= 0 && ret < (int)length) + if (ret == (int)length) + ret = 0; + else ret = -EIO; end_unlock: diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h index 95147ec7b96a..b65d9c01a211 100644 --- a/drivers/staging/vt6656/usbpipe.h +++ b/drivers/staging/vt6656/usbpipe.h @@ -18,6 +18,8 @@ #include "device.h" +#define VNT_REG_BLOCK_SIZE 64 + int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer); int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, @@ -26,6 +28,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data); int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data); +int vnt_control_out_blocks(struct vnt_private *priv, + u16 block, u8 reg, u16 len, u8 *data); + int vnt_start_interrupt_urb(struct vnt_private *priv); int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb); int vnt_tx_context(struct vnt_private *priv, diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c index 3eb2f11a5de1..2c5250ca2801 100644 --- a/drivers/staging/vt6656/wcmd.c +++ b/drivers/staging/vt6656/wcmd.c @@ -99,6 +99,7 @@ void vnt_run_command(struct work_struct *work) if (vnt_init(priv)) { /* If fail all ends TODO retry */ dev_err(&priv->usb->dev, "failed to start\n"); + usb_set_intfdata(priv->intf, NULL); ieee80211_free_hw(priv->hw); return; } diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c index df2640a79f02..b13d7341f8bb 100644 --- a/drivers/staging/wfx/data_tx.c +++ b/drivers/staging/wfx/data_tx.c @@ -16,7 +16,7 @@ #include "traces.h" #include "hif_tx_mib.h" -#define WFX_INVALID_RATE_ID (0xFF) +#define WFX_INVALID_RATE_ID 15 #define WFX_LINK_ID_NO_ASSOC 15 #define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ)) @@ -184,7 +184,7 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif, */ entry = list_entry(cache->free.prev, struct tx_policy, link); memcpy(entry->rates, wanted.rates, sizeof(entry->rates)); - entry->uploaded = 0; + entry->uploaded = false; entry->usage_count = 0; idx = entry - cache->cache; } @@ -202,6 +202,8 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx) int usage, locked; struct tx_policy_cache *cache = &wvif->tx_policy_cache; + if (idx == WFX_INVALID_RATE_ID) + return; spin_lock_bh(&cache->lock); locked = list_empty(&cache->free); usage = wfx_tx_policy_release(cache, &cache->cache[idx]); @@ -239,7 +241,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif) dst->terminate = 1; dst->count_init = 1; memcpy(&dst->rates, src->rates, sizeof(src->rates)); - src->uploaded = 1; + src->uploaded = true; arg->num_tx_rate_policies++; } } @@ -249,7 +251,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif) return 0; } -static void wfx_tx_policy_upload_work(struct work_struct *work) +void wfx_tx_policy_upload_work(struct work_struct *work) { struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work); @@ -270,7 +272,6 @@ void wfx_tx_policy_init(struct wfx_vif *wvif) spin_lock_init(&cache->lock); INIT_LIST_HEAD(&cache->used); INIT_LIST_HEAD(&cache->free); - INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) list_add(&cache->cache[i].link, &cache->free); @@ -523,9 +524,9 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) { if (rates[i + 1].idx == rates[i].idx && rates[i].idx != -1) { - rates[i].count = - max_t(int, rates[i].count, - rates[i + 1].count); + rates[i].count += rates[i + 1].count; + if (rates[i].count > 15) + rates[i].count = 15; rates[i + 1].idx = -1; rates[i + 1].count = 0; @@ -537,6 +538,17 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) } } } while (!finished); + // Ensure that MCS0 or 1Mbps is present at the end of the retry list + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + if (rates[i].idx == 0) + break; + if (rates[i].idx == -1) { + rates[i].idx = 0; + rates[i].count = 8; // == hw->max_rate_tries + rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS; + break; + } + } // All retries use long GI for (i = 1; i < IEEE80211_TX_MAX_RATES; i++) rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; @@ -550,7 +562,8 @@ static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif, rate_id = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew); - WARN(rate_id == WFX_INVALID_RATE_ID, "unable to get a valid Tx policy"); + if (rate_id == WFX_INVALID_RATE_ID) + dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy"); if (tx_policy_renew) { /* FIXME: It's not so optimal to stop TX queues every now and @@ -735,7 +748,9 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg) rate = &tx_info->status.rates[i]; if (rate->idx < 0) break; - if (tx_count < rate->count && arg->status && arg->ack_failures) + if (tx_count < rate->count && + arg->status == HIF_STATUS_RETRY_EXCEEDED && + arg->ack_failures) dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n", rate->count, tx_count); if (tx_count <= rate->count && tx_count && diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h index 29faa5640516..0fc388db62e0 100644 --- a/drivers/staging/wfx/data_tx.h +++ b/drivers/staging/wfx/data_tx.h @@ -39,9 +39,9 @@ struct wfx_link_entry { struct tx_policy { struct list_head link; + int usage_count; u8 rates[12]; - u8 usage_count; - u8 uploaded; + bool uploaded; }; struct tx_policy_cache { @@ -61,6 +61,7 @@ struct wfx_tx_priv { } __packed; void wfx_tx_policy_init(struct wfx_vif *wvif); +void wfx_tx_policy_upload_work(struct work_struct *work); void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h index bb091e395ff5..9be74881c56c 100644 --- a/drivers/staging/wfx/hif_tx_mib.h +++ b/drivers/staging/wfx/hif_tx_mib.h @@ -147,7 +147,6 @@ static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required) } if (!required) val.unpmf_allowed = 1; - cpu_to_le32s((u32 *) &val); return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_PROTECTED_MGMT_POLICY, &val, sizeof(val)); diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index 986a2ef678b9..3b47b6c21ea1 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -289,7 +289,7 @@ struct wfx_dev *wfx_init_common(struct device *dev, hw->sta_data_size = sizeof(struct wfx_sta_priv); hw->queues = 4; hw->max_rates = 8; - hw->max_rate_tries = 15; + hw->max_rate_tries = 8; hw->extra_tx_headroom = sizeof(struct hif_sl_msg_hdr) + sizeof(struct hif_msg) + sizeof(struct hif_req_tx) diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c index c7ee90888f69..680fed31cefb 100644 --- a/drivers/staging/wfx/queue.c +++ b/drivers/staging/wfx/queue.c @@ -422,6 +422,7 @@ static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb, break; case do_wep: wfx_tx_lock(wvif->wdev); + WARN_ON(wvif->wep_pending_skb); wvif->wep_default_key_id = tx_priv->hw_key->keyidx; wvif->wep_pending_skb = skb; if (!schedule_work(&wvif->wep_key_work)) diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c index 29848a202ab4..471dd15b227f 100644 --- a/drivers/staging/wfx/sta.c +++ b/drivers/staging/wfx/sta.c @@ -592,6 +592,7 @@ static void wfx_do_unjoin(struct wfx_vif *wvif) wfx_tx_flush(wvif->wdev); hif_keep_alive_period(wvif, 0); hif_reset(wvif, false); + wfx_tx_policy_init(wvif); hif_set_output_power(wvif, wvif->wdev->output_power * 10); wvif->dtim_period = 0; hif_set_macaddr(wvif, wvif->vif->addr); @@ -880,8 +881,10 @@ static int wfx_update_beaconing(struct wfx_vif *wvif) if (wvif->state != WFX_STATE_AP || wvif->beacon_int != conf->beacon_int) { wfx_tx_lock_flush(wvif->wdev); - if (wvif->state != WFX_STATE_PASSIVE) + if (wvif->state != WFX_STATE_PASSIVE) { hif_reset(wvif, false); + wfx_tx_policy_init(wvif); + } wvif->state = WFX_STATE_PASSIVE; wfx_start_ap(wvif); wfx_tx_unlock(wvif->wdev); @@ -1567,6 +1570,7 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work); INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work); + INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); mutex_unlock(&wdev->conf_mutex); hif_set_macaddr(wvif, vif->addr); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 6949ea8bc387..51ffd5c002de 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio, } bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); - bip_set_seed(bip, bio->bi_iter.bi_sector); + /* virtual start sector must be in integrity interval units */ + bip_set_seed(bip, bio->bi_iter.bi_sector >> + (bi->interval_exp - SECTOR_SHIFT)); pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, (unsigned long long)bip->bip_iter.bi_sector); diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c index 0332a5301d61..d767eebf30bd 100644 --- a/drivers/tee/optee/shm_pool.c +++ b/drivers/tee/optee/shm_pool.c @@ -28,9 +28,22 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, shm->size = PAGE_SIZE << order; if (shm->flags & TEE_SHM_DMA_BUF) { + unsigned int nr_pages = 1 << order, i; + struct page **pages; + + pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + for (i = 0; i < nr_pages; i++) { + pages[i] = page; + page++; + } + shm->flags |= TEE_SHM_REGISTER; - rc = optee_shm_register(shm->ctx, shm, &page, 1 << order, + rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, (unsigned long)shm->kaddr); + kfree(pages); } return rc; diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 015e7d201598..0e7cf5236932 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -110,6 +110,9 @@ static int tsens_register(struct tsens_priv *priv) irq = platform_get_irq_byname(pdev, "uplow"); if (irq < 0) { ret = irq; + /* For old DTs with no IRQ defined */ + if (irq == -ENXIO) + ret = 0; goto err_put_device; } diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 226adeec2aed..ce5309d00280 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -663,6 +663,12 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl, return AE_OK; } +static const struct acpi_device_id serdev_acpi_devices_blacklist[] = { + { "INT3511", 0 }, + { "INT3512", 0 }, + { }, +}; + static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -675,6 +681,10 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level, if (acpi_device_enumerated(adev)) return AE_OK; + /* Skip if black listed */ + if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist)) + return AE_OK; + if (acpi_serdev_check_resources(ctrl, adev)) return AE_OK; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index a8dc8af83f39..1ba9bc667e13 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2270,27 +2270,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, mode |= ATMEL_US_USMODE_NORMAL; } - /* set the mode, clock divisor, parity, stop bits and data size */ - atmel_uart_writel(port, ATMEL_US_MR, mode); - - /* - * when switching the mode, set the RTS line state according to the - * new mode, otherwise keep the former state - */ - if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { - unsigned int rts_state; - - if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { - /* let the hardware control the RTS line */ - rts_state = ATMEL_US_RTSDIS; - } else { - /* force RTS line to low level */ - rts_state = ATMEL_US_RTSEN; - } - - atmel_uart_writel(port, ATMEL_US_CR, rts_state); - } - /* * Set the baud rate: * Fractional baudrate allows to setup output frequency more @@ -2317,6 +2296,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) atmel_uart_writel(port, ATMEL_US_BRGR, quot); + + /* set the mode, clock divisor, parity, stop bits and data size */ + atmel_uart_writel(port, ATMEL_US_MR, mode); + + /* + * when switching the mode, set the RTS line state according to the + * new mode, otherwise keep the former state + */ + if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { + unsigned int rts_state; + + if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { + /* let the hardware control the RTS line */ + rts_state = ATMEL_US_RTSDIS; + } else { + /* force RTS line to low level */ + rts_state = ATMEL_US_RTSEN; + } + + atmel_uart_writel(port, ATMEL_US_CR, rts_state); + } + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); atmel_port->tx_stopped = false; diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 1cbae0768b1f..f6c45a796433 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1580,6 +1580,7 @@ static void __msm_console_write(struct uart_port *port, const char *s, int num_newlines = 0; bool replaced = false; void __iomem *tf; + int locked = 1; if (is_uartdm) tf = port->membase + UARTDM_TF; @@ -1592,7 +1593,13 @@ static void __msm_console_write(struct uart_port *port, const char *s, num_newlines++; count += num_newlines; - spin_lock(&port->lock); + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&port->lock); + else + spin_lock(&port->lock); + if (is_uartdm) msm_reset_dm_count(port, count); @@ -1628,7 +1635,9 @@ static void __msm_console_write(struct uart_port *port, const char *s, iowrite32_rep(tf, buf, 1); i += num_chars; } - spin_unlock(&port->lock); + + if (locked) + spin_unlock(&port->lock); } static void msm_console_write(struct console *co, const char *s, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index b0a6eb106edb..7c2782785736 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2834,6 +2834,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) if (uport->cons && uport->dev) of_console_check(uport->dev->of_node, uport->cons->name, uport->line); + tty_port_link_device(port, drv->tty_driver, uport->line); uart_configure_port(drv, state, uport); port->console = uart_console(uport); diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 31df23502562..f60a59d9bf27 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -679,6 +679,9 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id) if (ims & SPRD_IMSR_TIMEOUT) serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT); + if (ims & SPRD_IMSR_BREAK_DETECT) + serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT); + if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT | SPRD_IMSR_TIMEOUT)) sprd_rx(port); diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 4c1e75509303..02f6ca2cb1ba 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1375,13 +1375,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, */ static irqreturn_t cdns3_device_irq_handler(int irq, void *data) { - struct cdns3_device *priv_dev; - struct cdns3 *cdns = data; + struct cdns3_device *priv_dev = data; irqreturn_t ret = IRQ_NONE; u32 reg; - priv_dev = cdns->gadget_dev; - /* check USB device interrupt */ reg = readl(&priv_dev->regs->usb_ists); if (reg) { @@ -1419,14 +1416,12 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data) */ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) { - struct cdns3_device *priv_dev; - struct cdns3 *cdns = data; + struct cdns3_device *priv_dev = data; irqreturn_t ret = IRQ_NONE; unsigned long flags; int bit; u32 reg; - priv_dev = cdns->gadget_dev; spin_lock_irqsave(&priv_dev->lock, flags); reg = readl(&priv_dev->regs->usb_ists); @@ -2539,7 +2534,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns) priv_dev = cdns->gadget_dev; - devm_free_irq(cdns->dev, cdns->dev_irq, cdns); + devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); pm_runtime_mark_last_busy(cdns->dev); pm_runtime_put_autosuspend(cdns->dev); @@ -2710,7 +2705,8 @@ static int __cdns3_gadget_init(struct cdns3 *cdns) ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, cdns3_device_irq_handler, cdns3_device_thread_irq_handler, - IRQF_SHARED, dev_name(cdns->dev), cdns); + IRQF_SHARED, dev_name(cdns->dev), + cdns->gadget_dev); if (ret) goto err0; diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index b45ceb91c735..48e4a5ca1835 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -26,6 +26,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd); struct ehci_ci_priv { struct regulator *reg_vbus; + bool enabled; }; static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) @@ -37,7 +38,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) int ret = 0; int port = HCS_N_PORTS(ehci->hcs_params); - if (priv->reg_vbus) { + if (priv->reg_vbus && enable != priv->enabled) { if (port > 1) { dev_warn(dev, "Not support multi-port regulator control\n"); @@ -53,6 +54,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) enable ? "enable" : "disable", ret); return ret; } + priv->enabled = enable; } if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) { diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 5f40117e68e7..26bc05e48d8a 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_INT] = 1024, }; -static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, - int asnum, struct usb_host_interface *ifp, int num_ep, - unsigned char *buffer, int size) +static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1, + struct usb_endpoint_descriptor *e2) +{ + if (e1->bEndpointAddress == e2->bEndpointAddress) + return true; + + if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) { + if (usb_endpoint_num(e1) == usb_endpoint_num(e2)) + return true; + } + + return false; +} + +/* + * Check for duplicate endpoint addresses in other interfaces and in the + * altsetting currently being parsed. + */ +static bool config_endpoint_is_duplicate(struct usb_host_config *config, + int inum, int asnum, struct usb_endpoint_descriptor *d) +{ + struct usb_endpoint_descriptor *epd; + struct usb_interface_cache *intfc; + struct usb_host_interface *alt; + int i, j, k; + + for (i = 0; i < config->desc.bNumInterfaces; ++i) { + intfc = config->intf_cache[i]; + + for (j = 0; j < intfc->num_altsetting; ++j) { + alt = &intfc->altsetting[j]; + + if (alt->desc.bInterfaceNumber == inum && + alt->desc.bAlternateSetting != asnum) + continue; + + for (k = 0; k < alt->desc.bNumEndpoints; ++k) { + epd = &alt->endpoint[k].desc; + + if (endpoint_is_duplicate(epd, d)) + return true; + } + } + } + + return false; +} + +static int usb_parse_endpoint(struct device *ddev, int cfgno, + struct usb_host_config *config, int inum, int asnum, + struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) { unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; @@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, goto skip_to_next_endpoint_or_interface_descriptor; /* Check for duplicate endpoint addresses */ - for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { - if (ifp->endpoint[i].desc.bEndpointAddress == - d->bEndpointAddress) { - dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", - cfgno, inum, asnum, d->bEndpointAddress); - goto skip_to_next_endpoint_or_interface_descriptor; - } + if (config_endpoint_is_duplicate(config, inum, asnum, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; } endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; @@ -346,12 +392,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } - /* Validate the wMaxPacketSize field */ + /* + * Validate the wMaxPacketSize field. + * Some devices have isochronous endpoints in altsetting 0; + * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0 + * (see the end of section 5.6.3), so don't warn about them. + */ maxp = usb_endpoint_maxp(&endpoint->desc); - if (maxp == 0) { - dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n", + if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", cfgno, inum, asnum, d->bEndpointAddress); - goto skip_to_next_endpoint_or_interface_descriptor; } /* Find the highest legal maxpacket size for this endpoint */ @@ -522,8 +572,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno, if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; - retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt, - num_ep, buffer, size); + retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum, + alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index f229ad6952c0..3405b146edc9 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1192,6 +1192,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) * PORT_OVER_CURRENT is not. So check for any of them. */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || + (portchange & USB_PORT_STAT_C_CONNECTION) || (portstatus & USB_PORT_STAT_OVERCURRENT) || (portchange & USB_PORT_STAT_C_OVERCURRENT)) set_bit(port1, hub->change_bits); @@ -2692,7 +2693,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define SET_ADDRESS_TRIES 2 #define GET_DESCRIPTOR_TRIES 2 #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) -#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme) +#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme)) #define HUB_ROOT_RESET_TIME 60 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 0c960a97ea02..154f3f3e8cff 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2467,6 +2467,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { + /* + * For OUT direction, host may send less than the setup + * length. Return true for all OUT requests. + */ + if (!req->direction) + return true; + return req->request.actual == req->request.length; } diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index ae70ce29d5e4..797d6ace8994 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -445,6 +445,7 @@ config USB_TEGRA_XUDC tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller" depends on ARCH_TEGRA || COMPILE_TEST depends on PHY_TEGRA_XUSB + select USB_ROLE_SWITCH help Enables NVIDIA Tegra USB 3.0 device mode controller driver. diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index aa2f77f1506d..8a5c9b3ebe1e 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -27,6 +27,10 @@ /*-------------------------------------------------------------------------*/ +/* PID Codes that are used here, from EHCI specification, Table 3-16. */ +#define PID_CODE_IN 1 +#define PID_CODE_SETUP 2 + /* fill a qtd, returning how much of the buffer we were able to queue up */ static int @@ -190,7 +194,7 @@ static int qtd_copy_status ( int status = -EINPROGRESS; /* count IN/OUT bytes, not SETUP (even short packets) */ - if (likely (QTD_PID (token) != 2)) + if (likely(QTD_PID(token) != PID_CODE_SETUP)) urb->actual_length += length - QTD_LENGTH (token); /* don't modify error codes */ @@ -206,6 +210,13 @@ static int qtd_copy_status ( if (token & QTD_STS_BABBLE) { /* FIXME "must" disable babbling device's port too */ status = -EOVERFLOW; + /* + * When MMF is active and PID Code is IN, queue is halted. + * EHCI Specification, Table 4-13. + */ + } else if ((token & QTD_STS_MMF) && + (QTD_PID(token) == PID_CODE_IN)) { + status = -EPROTO; /* CERR nonzero + halt --> stall */ } else if (QTD_CERR(token)) { status = -EPIPE; diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index 38183ac438c6..1371b0c249ec 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -415,13 +415,17 @@ static int ohci_da8xx_probe(struct platform_device *pdev) } da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN); - if (IS_ERR(da8xx_ohci->oc_gpio)) + if (IS_ERR(da8xx_ohci->oc_gpio)) { + error = PTR_ERR(da8xx_ohci->oc_gpio); goto err; + } if (da8xx_ohci->oc_gpio) { oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio); - if (oc_irq < 0) + if (oc_irq < 0) { + error = oc_irq; goto err; + } error = devm_request_threaded_irq(dev, oc_irq, NULL, ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING | diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 2907fe4d78dd..4917c5b033fa 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -519,7 +519,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) retval = xhci_resume(xhci, hibernated); return retval; } -#endif /* CONFIG_PM */ static void xhci_pci_shutdown(struct usb_hcd *hcd) { @@ -532,6 +531,7 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd) if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) pci_set_power_state(pdev, PCI_D3hot); } +#endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index 5261f8dfedec..e3b8c84ccdb8 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -75,14 +75,17 @@ static struct musb_hdrc_platform_data jz4740_musb_platform_data = { static int jz4740_musb_init(struct musb *musb) { struct device *dev = musb->controller->parent; + int err; if (dev->of_node) musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0); else musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); if (IS_ERR(musb->xceiv)) { - dev_err(dev, "No transceiver configured\n"); - return PTR_ERR(musb->xceiv); + err = PTR_ERR(musb->xceiv); + if (err != -EPROBE_DEFER) + dev_err(dev, "No transceiver configured: %d", err); + return err; } /* Silicon does not implement ConfigData register. diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 15cca912c53e..5ebf30bd61bd 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1840,6 +1840,9 @@ ATTRIBUTE_GROUPS(musb); #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \ (2 << MUSB_DEVCTL_VBUS_SHIFT) | \ MUSB_DEVCTL_SESSION) +#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \ + (3 << MUSB_DEVCTL_VBUS_SHIFT) | \ + MUSB_DEVCTL_SESSION) #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \ MUSB_DEVCTL_SESSION) @@ -1862,6 +1865,11 @@ static void musb_pm_runtime_check_session(struct musb *musb) s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV | MUSB_DEVCTL_HR; switch (devctl & ~s) { + case MUSB_QUIRK_B_DISCONNECT_99: + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + break; case MUSB_QUIRK_B_INVALID_VBUS_91: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, @@ -2310,6 +2318,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_disable_interrupts(musb); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */ + musb_writeb(musb->mregs, MUSB_POWER, 0); + /* Init IRQ workqueue before request_irq */ INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 5fc6825745f2..2d3751d885b4 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -425,7 +425,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb, controller->controller.channel_abort = dma_channel_abort; if (request_irq(irq, dma_controller_irq, 0, - dev_name(musb->controller), &controller->controller)) { + dev_name(musb->controller), controller)) { dev_err(dev, "request_irq %d failed!\n", irq); musb_dma_controller_destroy(&controller->controller); diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index df582fe855f0..d3f420f3a083 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -642,9 +642,13 @@ static int ch341_tiocmget(struct tty_struct *tty) static int ch341_reset_resume(struct usb_serial *serial) { struct usb_serial_port *port = serial->port[0]; - struct ch341_private *priv = usb_get_serial_port_data(port); + struct ch341_private *priv; int ret; + priv = usb_get_serial_port_data(port); + if (!priv) + return 0; + /* reconfigure ch341 serial port after bus-reset */ ch341_configure(serial->dev, priv); diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 9690a5f4b9d6..5737add6a2a4 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -716,7 +716,7 @@ static void edge_interrupt_callback(struct urb *urb) if (txCredits) { port = edge_serial->serial->port[portNumber]; edge_port = usb_get_serial_port_data(port); - if (edge_port->open) { + if (edge_port && edge_port->open) { spin_lock_irqsave(&edge_port->ep_lock, flags); edge_port->txCredits += txCredits; @@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state) static void process_rcvd_data(struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength) { - struct device *dev = &edge_serial->serial->dev->dev; + struct usb_serial *serial = edge_serial->serial; + struct device *dev = &serial->dev->dev; struct usb_serial_port *port; struct edgeport_port *edge_port; __u16 lastBufferLength; @@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial, /* spit this data back into the tty driver if this port is open */ - if (rxLen) { - port = edge_serial->serial->port[ - edge_serial->rxPort]; + if (rxLen && edge_serial->rxPort < serial->num_ports) { + port = serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); - if (edge_port->open) { + if (edge_port && edge_port->open) { dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n", __func__, rxLen, edge_serial->rxPort); @@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial, rxLen); edge_port->port->icount.rx += rxLen; } - buffer += rxLen; } + buffer += rxLen; break; case EXPECT_HDR3: /* Expect 3rd byte of status header */ @@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial, __u8 code = edge_serial->rxStatusCode; /* switch the port pointer to the one being currently talked about */ + if (edge_serial->rxPort >= edge_serial->serial->num_ports) + return; port = edge_serial->serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); if (edge_port == NULL) { diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index e66a59ef43a1..aa3dbce22cfb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1058,6 +1058,8 @@ static void usa49_glocont_callback(struct urb *urb) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + continue; if (p_priv->resend_cont) { dev_dbg(&port->dev, "%s - sending setup\n", __func__); @@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + continue; if (p_priv->resend_cont) { dev_dbg(&port->dev, "%s - sending setup\n", __func__); diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index cb7aac9cd9e7..ed2b4e6dca38 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -113,7 +113,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), requesttype, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, - 0, 0, buffer, 1, 0); + 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT); kfree(buffer); if (retval < 0) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e9491d400a24..084cc2fff3ae 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 +#define QUECTEL_PRODUCT_RM500Q 0x0800 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -567,6 +568,9 @@ static void option_instat_callback(struct urb *urb); /* Interface must have two endpoints */ #define NUMEP2 BIT(16) +/* Device needs ZLP */ +#define ZLP BIT(17) + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, @@ -1101,6 +1105,11 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), + .driver_info = ZLP }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), @@ -1172,6 +1181,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */ + .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), @@ -1196,6 +1207,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -2097,6 +2110,9 @@ static int option_attach(struct usb_serial *serial) if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) data->use_send_setup = 1; + if (device_flags & ZLP) + data->use_zlp = 1; + spin_lock_init(&data->susp_lock); usb_set_serial_data(serial, data); diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index a62981ca7a73..f93b81a297d6 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) u8 newMSR = (u8) *ch; unsigned long flags; + /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); + if (!port_priv) + return; spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowMSR = newMSR; @@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch) unsigned long flags; u8 newLSR = (u8) *ch; + /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); + if (!port_priv) + return; if (newLSR & UART_LSR_BI) newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index edbbb13d6de6..bd23a7cb1be2 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS); #define MOTOROLA_TETRA_IDS() \ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ + { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \ + { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 8f066bb55d7d..dc7a65b9ec98 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver) return -EINVAL; } + /* Prevent individual ports from being unbound. */ + driver->driver.suppress_bind_attrs = true; + usb_serial_operations_init(driver); /* Add this device to our list of devices */ diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h index 1c120eaf4091..934e9361cf6b 100644 --- a/drivers/usb/serial/usb-wwan.h +++ b/drivers/usb/serial/usb-wwan.h @@ -38,6 +38,7 @@ struct usb_wwan_intf_private { spinlock_t susp_lock; unsigned int suspended:1; unsigned int use_send_setup:1; + unsigned int use_zlp:1; int in_flight; unsigned int open_ports; void *private; diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 7e855c87e4f7..13be21aad2f4 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -461,6 +461,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, void (*callback) (struct urb *)) { struct usb_serial *serial = port->serial; + struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ @@ -471,6 +472,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); + if (intfdata->use_zlp && dir == USB_DIR_OUT) + urb->transfer_flags |= URB_ZERO_PACKET; + return urb; } diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig index 72481bbb2af3..5b986d6c801d 100644 --- a/drivers/usb/typec/tcpm/Kconfig +++ b/drivers/usb/typec/tcpm/Kconfig @@ -32,6 +32,7 @@ endif # TYPEC_TCPCI config TYPEC_FUSB302 tristate "Fairchild FUSB302 Type-C chip driver" depends on I2C + depends on EXTCON || !EXTCON help The Fairchild FUSB302 Type-C chip driver that works with Type-C Port Controller Manager to provide USB PD and USB diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index c1f7073a56de..8b4ff9fff340 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -432,20 +432,30 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci) if (status & TCPC_ALERT_RX_STATUS) { struct pd_message msg; - unsigned int cnt; + unsigned int cnt, payload_cnt; u16 header; regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt); + /* + * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14 + * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is + * defined in table 4-36 as one greater than the number of + * bytes received. And that number includes the header. So: + */ + if (cnt > 3) + payload_cnt = cnt - (1 + sizeof(msg.header)); + else + payload_cnt = 0; tcpci_read16(tcpci, TCPC_RX_HDR, &header); msg.header = cpu_to_le16(header); - if (WARN_ON(cnt > sizeof(msg.payload))) - cnt = sizeof(msg.payload); + if (WARN_ON(payload_cnt > sizeof(msg.payload))) + payload_cnt = sizeof(msg.payload); - if (cnt > 0) + if (payload_cnt > 0) regmap_raw_read(tcpci->regmap, TCPC_RX_DATA, - &msg.payload, cnt); + &msg.payload, payload_cnt); /* Read complete, clear RX status alert bit */ tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 8569bbd3762f..831c9470bdc1 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -94,15 +94,15 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num); #define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16) #define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17) #define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18) -#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19) -#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20) -#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21) -#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22) -#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23) -#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24) -#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25) -#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26) -#define UCSI_ENABLE_NTFY_ERROR BIT(27) +#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(21) +#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(22) +#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(23) +#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(24) +#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(25) +#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(27) +#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(28) +#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(30) +#define UCSI_ENABLE_NTFY_ERROR BIT(31) #define UCSI_ENABLE_NTFY_ALL 0xdbe70000 /* SET_UOR command bits */ diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 6532d68e8808..e4b96674c405 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -727,6 +727,9 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) copy -= recv; ret += recv; + + if (!copy) + break; } if (ret != size) diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 33f8972ba842..00fc98741c5d 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -77,16 +77,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0); /* recv transfer buffer */ - if (usbip_recv_xbuff(ud, urb) < 0) - return; + if (usbip_recv_xbuff(ud, urb) < 0) { + urb->status = -EPROTO; + goto error; + } /* recv iso_packet_descriptor */ - if (usbip_recv_iso(ud, urb) < 0) - return; + if (usbip_recv_iso(ud, urb) < 0) { + urb->status = -EPROTO; + goto error; + } /* restore the padding in iso packets */ usbip_pad_iso(ud, urb); +error: if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1679e0dc869b..cec868f8db3f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG config MAX77620_WATCHDOG tristate "Maxim Max77620 Watchdog Timer" depends on MFD_MAX77620 || COMPILE_TEST + select WATCHDOG_CORE help This is the driver for the Max77620 watchdog timer. Say 'Y' here to enable the watchdog timer support for @@ -1444,6 +1445,7 @@ config SMSC37B787_WDT config TQMX86_WDT tristate "TQ-Systems TQMX86 Watchdog Timer" depends on X86 + select WATCHDOG_CORE help This is the driver for the hardware watchdog timer in the TQMX86 IO controller found on some of their ComExpress Modules. diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c index 0a87c6f4bab2..11b9e7c6b7f5 100644 --- a/drivers/watchdog/imx7ulp_wdt.c +++ b/drivers/watchdog/imx7ulp_wdt.c @@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog, { struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); - imx7ulp_wdt_enable(wdt->base, true); + imx7ulp_wdt_enable(wdog, true); imx7ulp_wdt_set_timeout(&wdt->wdd, 1); /* wait for wdog to fire */ diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 1cccf8eb1c5d..8e6dfe76f9c9 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev) set_bit(WDOG_HW_RUNNING, &dev->wdt.status); /* Request the IRQ only after the watchdog is disabled */ - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { /* * Not all supported platforms specify an interrupt for the @@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev) } /* Optional 2nd interrupt for pretimeout */ - irq = platform_get_irq(pdev, 1); + irq = platform_get_irq_optional(pdev, 1); if (irq > 0) { orion_wdt_info.options |= WDIOF_PRETIMEOUT; ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq, diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c index 234876047431..6e524c8e26a8 100644 --- a/drivers/watchdog/rn5t618_wdt.c +++ b/drivers/watchdog/rn5t618_wdt.c @@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = { module_platform_driver(rn5t618_wdt_driver); +MODULE_ALIAS("platform:rn5t618-wdt"); MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); MODULE_DESCRIPTION("RN5T618 watchdog driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index fdf533fe0bb2..56a4a4030ca9 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -420,7 +420,7 @@ static int wdt_find(int addr) cr_wdt_csr = NCT6102D_WDT_CSR; break; case NCT6116_ID: - ret = nct6102; + ret = nct6116; cr_wdt_timeout = NCT6102D_WDT_TIMEOUT; cr_wdt_control = NCT6102D_WDT_CONTROL; cr_wdt_csr = NCT6102D_WDT_CSR; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 49b381e104ef..7b36b51cdb9f 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -664,7 +664,6 @@ static int grow_gnttab_list(unsigned int more_frames) unsigned int nr_glist_frames, new_nr_glist_frames; unsigned int grefs_per_frame; - BUG_ON(gnttab_interface == NULL); grefs_per_frame = gnttab_interface->grefs_per_grant_frame; new_nr_grant_frames = nr_grant_frames + more_frames; @@ -1160,7 +1159,6 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); static unsigned int nr_status_frames(unsigned int nr_grant_frames) { - BUG_ON(gnttab_interface == NULL); return gnttab_frames(nr_grant_frames, SPP); } @@ -1388,7 +1386,6 @@ static int gnttab_expand(unsigned int req_entries) int rc; unsigned int cur, extra; - BUG_ON(gnttab_interface == NULL); cur = nr_grant_frames; extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) / gnttab_interface->grefs_per_grant_frame); @@ -1423,7 +1420,6 @@ int gnttab_init(void) /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ - BUG_ON(gnttab_interface == NULL); max_nr_glist_frames = (max_nr_grant_frames * gnttab_interface->grefs_per_grant_frame / RPP); diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index d75a2385b37c..5f5b8a7d5b80 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -116,8 +116,6 @@ int xenbus_probe_devices(struct xen_bus_type *bus); void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); -void xenbus_dev_shutdown(struct device *_dev); - int xenbus_dev_suspend(struct device *dev); int xenbus_dev_resume(struct device *dev); int xenbus_dev_cancel(struct device *dev); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index c21be6e9d38a..378486b79f96 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -255,7 +255,6 @@ fail_put: module_put(drv->driver.owner); fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); - xenbus_switch_state(dev, XenbusStateClosed); return err; } EXPORT_SYMBOL_GPL(xenbus_dev_probe); @@ -276,34 +275,20 @@ int xenbus_dev_remove(struct device *_dev) free_otherend_details(dev); - xenbus_switch_state(dev, XenbusStateClosed); + /* + * If the toolstack has forced the device state to closing then set + * the state to closed now to allow it to be cleaned up. + * Similarly, if the driver does not support re-bind, set the + * closed. + */ + if (!drv->allow_rebind || + xenbus_read_driver_state(dev->nodename) == XenbusStateClosing) + xenbus_switch_state(dev, XenbusStateClosed); + return 0; } EXPORT_SYMBOL_GPL(xenbus_dev_remove); -void xenbus_dev_shutdown(struct device *_dev) -{ - struct xenbus_device *dev = to_xenbus_device(_dev); - unsigned long timeout = 5*HZ; - - DPRINTK("%s", dev->nodename); - - get_device(&dev->dev); - if (dev->state != XenbusStateConnected) { - pr_info("%s: %s: %s != Connected, skipping\n", - __func__, dev->nodename, xenbus_strstate(dev->state)); - goto out; - } - xenbus_switch_state(dev, XenbusStateClosing); - timeout = wait_for_completion_timeout(&dev->down, timeout); - if (!timeout) - pr_info("%s: %s timeout closing device\n", - __func__, dev->nodename); - out: - put_device(&dev->dev); -} -EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); - int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name) diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c index b0bed4faf44c..14876faff3b0 100644 --- a/drivers/xen/xenbus/xenbus_probe_backend.c +++ b/drivers/xen/xenbus/xenbus_probe_backend.c @@ -198,7 +198,6 @@ static struct xen_bus_type xenbus_backend = { .uevent = xenbus_uevent_backend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, - .shutdown = xenbus_dev_shutdown, .dev_groups = xenbus_dev_groups, }, }; diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index a7d90a719cea..8a1650bbe18f 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -126,6 +126,28 @@ static int xenbus_frontend_dev_probe(struct device *dev) return xenbus_dev_probe(dev); } +static void xenbus_frontend_dev_shutdown(struct device *_dev) +{ + struct xenbus_device *dev = to_xenbus_device(_dev); + unsigned long timeout = 5*HZ; + + DPRINTK("%s", dev->nodename); + + get_device(&dev->dev); + if (dev->state != XenbusStateConnected) { + pr_info("%s: %s: %s != Connected, skipping\n", + __func__, dev->nodename, xenbus_strstate(dev->state)); + goto out; + } + xenbus_switch_state(dev, XenbusStateClosing); + timeout = wait_for_completion_timeout(&dev->down, timeout); + if (!timeout) + pr_info("%s: %s timeout closing device\n", + __func__, dev->nodename); + out: + put_device(&dev->dev); +} + static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, .resume = xenbus_frontend_dev_resume, @@ -146,7 +168,7 @@ static struct xen_bus_type xenbus_frontend = { .uevent = xenbus_uevent_frontend, .probe = xenbus_frontend_dev_probe, .remove = xenbus_dev_remove, - .shutdown = xenbus_dev_shutdown, + .shutdown = xenbus_frontend_dev_shutdown, .dev_groups = xenbus_dev_groups, .pm = &xenbus_pm_ops, diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 497f979018c2..5c794f4b051a 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -908,6 +908,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct afs_vnode *dvnode = AFS_FS_I(dir); + struct afs_fid fid = {}; struct inode *inode; struct dentry *d; struct key *key; @@ -951,21 +952,18 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, afs_stat_v(dvnode, n_lookup); inode = afs_do_lookup(dir, dentry, key); key_put(key); - if (inode == ERR_PTR(-ENOENT)) { + if (inode == ERR_PTR(-ENOENT)) inode = afs_try_auto_mntpt(dentry, dir); - } else { - dentry->d_fsdata = - (void *)(unsigned long)dvnode->status.data_version; - } + + if (!IS_ERR_OR_NULL(inode)) + fid = AFS_FS_I(inode)->fid; + d = d_splice_alias(inode, dentry); if (!IS_ERR_OR_NULL(d)) { d->d_fsdata = dentry->d_fsdata; - trace_afs_lookup(dvnode, &d->d_name, - inode ? AFS_FS_I(inode) : NULL); + trace_afs_lookup(dvnode, &d->d_name, &fid); } else { - trace_afs_lookup(dvnode, &dentry->d_name, - IS_ERR_OR_NULL(inode) ? NULL - : AFS_FS_I(inode)); + trace_afs_lookup(dvnode, &dentry->d_name, &fid); } return d; } diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index ee834ef7beb4..43e1660f450f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -447,7 +447,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, if (blkcg_css) { bio->bi_opf |= REQ_CGROUP_PUNT; - bio_associate_blkg_from_css(bio, blkcg_css); + kthread_associate_blkcg(blkcg_css); } refcount_set(&cb->pending_bios, 1); @@ -491,6 +491,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, bio->bi_opf = REQ_OP_WRITE | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; + if (blkcg_css) + bio->bi_opf |= REQ_CGROUP_PUNT; bio_add_page(bio, page, PAGE_SIZE, 0); } if (bytes_left < PAGE_SIZE) { @@ -517,6 +519,9 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, bio_endio(bio); } + if (blkcg_css) + kthread_associate_blkcg(NULL); + return 0; } diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 5b6e86aaf2e1..24658b5a5787 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -379,7 +379,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, for (node = rb_first(tm_root); node; node = next) { next = rb_next(node); tm = rb_entry(node, struct tree_mod_elem, node); - if (tm->seq > min_seq) + if (tm->seq >= min_seq) continue; rb_erase(node, tm_root); kfree(tm); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b2e8fd8a8e59..54efb21c2727 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2787,7 +2787,7 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( /* file-item.c */ struct btrfs_dio_private; int btrfs_del_csums(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); + struct btrfs_root *root, u64 bytenr, u64 len); blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst); blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 153f71a5bba9..274318e9114e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1869,8 +1869,8 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, btrfs_pin_extent(fs_info, head->bytenr, head->num_bytes, 1); if (head->is_data) { - ret = btrfs_del_csums(trans, fs_info, head->bytenr, - head->num_bytes); + ret = btrfs_del_csums(trans, fs_info->csum_root, + head->bytenr, head->num_bytes); } } @@ -3175,7 +3175,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, btrfs_release_path(path); if (is_data) { - ret = btrfs_del_csums(trans, info, bytenr, num_bytes); + ret = btrfs_del_csums(trans, info->csum_root, bytenr, + num_bytes); if (ret) { btrfs_abort_transaction(trans, ret); goto out; @@ -3799,6 +3800,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, u64 flags, int delalloc) { int ret = 0; + int cache_block_group_error = 0; struct btrfs_free_cluster *last_ptr = NULL; struct btrfs_block_group *block_group = NULL; struct find_free_extent_ctl ffe_ctl = {0}; @@ -3958,7 +3960,20 @@ have_block_group: if (unlikely(!ffe_ctl.cached)) { ffe_ctl.have_caching_bg = true; ret = btrfs_cache_block_group(block_group, 0); - BUG_ON(ret < 0); + + /* + * If we get ENOMEM here or something else we want to + * try other block groups, because it may not be fatal. + * However if we can't find anything else we need to + * save our return here so that we return the actual + * error that caused problems, not ENOSPC. + */ + if (ret < 0) { + if (!cache_block_group_error) + cache_block_group_error = ret; + ret = 0; + goto loop; + } ret = 0; } @@ -4045,7 +4060,7 @@ loop: if (ret > 0) goto search; - if (ret == -ENOSPC) { + if (ret == -ENOSPC && !cache_block_group_error) { /* * Use ffe_ctl->total_free_space as fallback if we can't find * any contiguous hole. @@ -4056,6 +4071,8 @@ loop: space_info->max_extent_size = ffe_ctl.max_extent_size; spin_unlock(&space_info->lock); ins->offset = ffe_ctl.max_extent_size; + } else if (ret == -ENOSPC) { + ret = cache_block_group_error; } return ret; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index eb8bd0258360..2f4802f405a2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -5074,12 +5074,14 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, return eb; eb = alloc_dummy_extent_buffer(fs_info, start); if (!eb) - return NULL; + return ERR_PTR(-ENOMEM); eb->fs_info = fs_info; again: ret = radix_tree_preload(GFP_NOFS); - if (ret) + if (ret) { + exists = ERR_PTR(ret); goto free_eb; + } spin_lock(&fs_info->buffer_lock); ret = radix_tree_insert(&fs_info->buffer_radix, start >> PAGE_SHIFT, eb); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 3270a40b0777..b1bfdc5c1387 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -590,9 +590,9 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info, * range of bytes. */ int btrfs_del_csums(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, u64 bytenr, u64 len) + struct btrfs_root *root, u64 bytenr, u64 len) { - struct btrfs_root *root = fs_info->csum_root; + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_path *path; struct btrfs_key key; u64 end_byte = bytenr + len; @@ -602,6 +602,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); int blocksize_bits = fs_info->sb->s_blocksize_bits; + ASSERT(root == fs_info->csum_root || + root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); + path = btrfs_alloc_path(); if (!path) return -ENOMEM; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0cb43b682789..8d47c76b7bd1 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2599,8 +2599,8 @@ int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, } } - if (clone_info) { - u64 clone_len = drop_end - cur_offset; + if (clone_info && drop_end > clone_info->file_offset) { + u64 clone_len = drop_end - clone_info->file_offset; ret = btrfs_insert_clone_extent(trans, inode, path, clone_info, clone_len); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 56032c518b26..c70baafb2a39 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1479,10 +1479,10 @@ next_slot: disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); /* - * If extent we got ends before our range starts, skip - * to next extent + * If the extent we got ends before our current offset, + * skip to the next extent. */ - if (extent_end <= start) { + if (extent_end <= cur_offset) { path->slots[0]++; goto next_slot; } @@ -4238,18 +4238,30 @@ out: } static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, - struct inode *dir, u64 objectid, - const char *name, int name_len) + struct inode *dir, struct dentry *dentry) { struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; struct btrfs_key key; + const char *name = dentry->d_name.name; + int name_len = dentry->d_name.len; u64 index; int ret; + u64 objectid; u64 dir_ino = btrfs_ino(BTRFS_I(dir)); + if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { + objectid = inode->root->root_key.objectid; + } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { + objectid = inode->location.objectid; + } else { + WARN_ON(1); + return -EINVAL; + } + path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -4271,13 +4283,16 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, } btrfs_release_path(path); - ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid, - dir_ino, &index, name, name_len); - if (ret < 0) { - if (ret != -ENOENT) { - btrfs_abort_transaction(trans, ret); - goto out; - } + /* + * This is a placeholder inode for a subvolume we didn't have a + * reference to at the time of the snapshot creation. In the meantime + * we could have renamed the real subvol link into our snapshot, so + * depending on btrfs_del_root_ref to return -ENOENT here is incorret. + * Instead simply lookup the dir_index_item for this entry so we can + * remove it. Otherwise we know we have a ref to the root and we can + * call btrfs_del_root_ref, and it _shouldn't_ fail. + */ + if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { di = btrfs_search_dir_index_item(root, path, dir_ino, name, name_len); if (IS_ERR_OR_NULL(di)) { @@ -4292,8 +4307,16 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); index = key.offset; + btrfs_release_path(path); + } else { + ret = btrfs_del_root_ref(trans, objectid, + root->root_key.objectid, dir_ino, + &index, name, name_len); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } } - btrfs_release_path(path); ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index); if (ret) { @@ -4487,8 +4510,7 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); - ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid, - dentry->d_name.name, dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, dir, dentry); if (ret) { err = ret; btrfs_abort_transaction(trans, ret); @@ -4583,10 +4605,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) return PTR_ERR(trans); if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - err = btrfs_unlink_subvol(trans, dir, - BTRFS_I(inode)->location.objectid, - dentry->d_name.name, - dentry->d_name.len); + err = btrfs_unlink_subvol(trans, dir, dentry); goto out; } @@ -5728,7 +5747,6 @@ static void inode_tree_add(struct inode *inode) static void inode_tree_del(struct inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; int empty = 0; @@ -5741,7 +5759,6 @@ static void inode_tree_del(struct inode *inode) spin_unlock(&root->inode_lock); if (empty && btrfs_root_refs(&root->root_item) == 0) { - synchronize_srcu(&fs_info->subvol_srcu); spin_lock(&root->inode_lock); empty = RB_EMPTY_ROOT(&root->inode_tree); spin_unlock(&root->inode_lock); @@ -9538,7 +9555,6 @@ static int btrfs_rename_exchange(struct inode *old_dir, u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); u64 old_idx = 0; u64 new_idx = 0; - u64 root_objectid; int ret; bool root_log_pinned = false; bool dest_log_pinned = false; @@ -9556,9 +9572,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, btrfs_init_log_ctx(&ctx_dest, new_inode); /* close the race window with snapshot create/destroy ioctl */ - if (old_ino == BTRFS_FIRST_FREE_OBJECTID) - down_read(&fs_info->subvol_sem); - if (new_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID || + new_ino == BTRFS_FIRST_FREE_OBJECTID) down_read(&fs_info->subvol_sem); /* @@ -9645,10 +9660,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, /* src is a subvolume */ if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { - root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; - ret = btrfs_unlink_subvol(trans, old_dir, root_objectid, - old_dentry->d_name.name, - old_dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); } else { /* src is an inode */ ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), BTRFS_I(old_dentry->d_inode), @@ -9664,10 +9676,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, /* dest is a subvolume */ if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { - root_objectid = BTRFS_I(new_inode)->root->root_key.objectid; - ret = btrfs_unlink_subvol(trans, new_dir, root_objectid, - new_dentry->d_name.name, - new_dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, new_dir, new_dentry); } else { /* dest is an inode */ ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), BTRFS_I(new_dentry->d_inode), @@ -9792,9 +9801,8 @@ out_fail: ret = ret ? ret : ret2; } out_notrans: - if (new_ino == BTRFS_FIRST_FREE_OBJECTID) - up_read(&fs_info->subvol_sem); - if (old_ino == BTRFS_FIRST_FREE_OBJECTID) + if (new_ino == BTRFS_FIRST_FREE_OBJECTID || + old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); ASSERT(list_empty(&ctx_root.list)); @@ -9866,7 +9874,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_inode = d_inode(new_dentry); struct inode *old_inode = d_inode(old_dentry); u64 index = 0; - u64 root_objectid; int ret; u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); bool log_pinned = false; @@ -9974,10 +9981,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, BTRFS_I(old_inode), 1); if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { - root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; - ret = btrfs_unlink_subvol(trans, old_dir, root_objectid, - old_dentry->d_name.name, - old_dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); } else { ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), BTRFS_I(d_inode(old_dentry)), @@ -9996,10 +10000,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_inode->i_ctime = current_time(new_inode); if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - root_objectid = BTRFS_I(new_inode)->location.objectid; - ret = btrfs_unlink_subvol(trans, new_dir, root_objectid, - new_dentry->d_name.name, - new_dentry->d_name.len); + ret = btrfs_unlink_subvol(trans, new_dir, new_dentry); BUG_ON(new_inode->i_nlink == 0); } else { ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a1ee0b775e65..12ae31e1813e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -704,11 +704,17 @@ static noinline int create_subvol(struct inode *dir, btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2); ret = btrfs_update_inode(trans, root, dir); - BUG_ON(ret); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto fail; + } ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid, btrfs_ino(BTRFS_I(dir)), index, name, namelen); - BUG_ON(ret); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto fail; + } ret = btrfs_uuid_tree_add(trans, root_item->uuid, BTRFS_UUID_KEY_SUBVOL, objectid); @@ -3720,24 +3726,18 @@ process_slot: ret = 0; if (last_dest_end < destoff + len) { - struct btrfs_clone_extent_info clone_info = { 0 }; /* - * We have an implicit hole (NO_HOLES feature is enabled) that - * fully or partially overlaps our cloning range at its end. + * We have an implicit hole that fully or partially overlaps our + * cloning range at its end. This means that we either have the + * NO_HOLES feature enabled or the implicit hole happened due to + * mixing buffered and direct IO writes against this file. */ btrfs_release_path(path); path->leave_spinning = 0; - /* - * We are dealing with a hole and our clone_info already has a - * disk_offset of 0, we only need to fill the data length and - * file offset. - */ - clone_info.data_len = destoff + len - last_dest_end; - clone_info.file_offset = last_dest_end; ret = btrfs_punch_hole_range(inode, path, last_dest_end, destoff + len - 1, - &clone_info, &trans); + NULL, &trans); if (ret) goto out; @@ -4252,7 +4252,19 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg) &sa->progress, sa->flags & BTRFS_SCRUB_READONLY, 0); - if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa))) + /* + * Copy scrub args to user space even if btrfs_scrub_dev() returned an + * error. This is important as it allows user space to know how much + * progress scrub has done. For example, if scrub is canceled we get + * -ECANCELED from btrfs_scrub_dev() and return that error back to user + * space. Later user space can inspect the progress from the structure + * btrfs_ioctl_scrub_args and resume scrub from where it left off + * previously (btrfs-progs does this). + * If we fail to copy the btrfs_ioctl_scrub_args structure to user space + * then return -EFAULT to signal the structure was not copied or it may + * be corrupt and unreliable due to a partial copy. + */ + if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; if (!(sa->flags & BTRFS_SCRUB_READONLY)) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 93aeb2e539a4..39fc8c3d3a75 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2423,8 +2423,12 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 nr_old_roots = 0; int ret = 0; + /* + * If quotas get disabled meanwhile, the resouces need to be freed and + * we can't just exit here. + */ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) - return 0; + goto out_free; if (new_roots) { if (!maybe_fs_roots(new_roots)) @@ -3232,12 +3236,12 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { btrfs_warn(fs_info, - "qgroup rescan init failed, qgroup is not enabled"); + "qgroup rescan init failed, qgroup rescan is not queued"); ret = -EINVAL; } else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) { btrfs_warn(fs_info, - "qgroup rescan init failed, qgroup rescan is not queued"); + "qgroup rescan init failed, qgroup is not enabled"); ret = -EINVAL; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index d897a8e5e430..da5abd62db22 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -517,6 +517,34 @@ static int update_backref_cache(struct btrfs_trans_handle *trans, return 1; } +static bool reloc_root_is_dead(struct btrfs_root *root) +{ + /* + * Pair with set_bit/clear_bit in clean_dirty_subvols and + * btrfs_update_reloc_root. We need to see the updated bit before + * trying to access reloc_root + */ + smp_rmb(); + if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) + return true; + return false; +} + +/* + * Check if this subvolume tree has valid reloc tree. + * + * Reloc tree after swap is considered dead, thus not considered as valid. + * This is enough for most callers, as they don't distinguish dead reloc root + * from no reloc root. But should_ignore_root() below is a special case. + */ +static bool have_reloc_root(struct btrfs_root *root) +{ + if (reloc_root_is_dead(root)) + return false; + if (!root->reloc_root) + return false; + return true; +} static int should_ignore_root(struct btrfs_root *root) { @@ -525,6 +553,10 @@ static int should_ignore_root(struct btrfs_root *root) if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) return 0; + /* This root has been merged with its reloc tree, we can ignore it */ + if (reloc_root_is_dead(root)) + return 1; + reloc_root = root->reloc_root; if (!reloc_root) return 0; @@ -1439,7 +1471,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, * The subvolume has reloc tree but the swap is finished, no need to * create/update the dead reloc tree */ - if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) + if (reloc_root_is_dead(root)) return 0; if (root->reloc_root) { @@ -1478,8 +1510,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root_item *root_item; int ret; - if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) || - !root->reloc_root) + if (!have_reloc_root(root)) goto out; reloc_root = root->reloc_root; @@ -1489,6 +1520,11 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, if (fs_info->reloc_ctl->merge_reloc_tree && btrfs_root_refs(root_item) == 0) { set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); + /* + * Mark the tree as dead before we change reloc_root so + * have_reloc_root will not touch it from now on. + */ + smp_wmb(); __del_reloc_root(reloc_root); } @@ -2201,6 +2237,11 @@ static int clean_dirty_subvols(struct reloc_control *rc) if (ret2 < 0 && !ret) ret = ret2; } + /* + * Need barrier to ensure clear_bit() only happens after + * root->reloc_root = NULL. Pairs with have_reloc_root. + */ + smp_wmb(); clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); btrfs_put_fs_root(root); } else { @@ -4552,6 +4593,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) fs_root = read_fs_root(fs_info, reloc_root->root_key.offset); if (IS_ERR(fs_root)) { err = PTR_ERR(fs_root); + list_add_tail(&reloc_root->root_list, &reloc_roots); goto out_free; } @@ -4717,7 +4759,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, struct btrfs_root *root = pending->root; struct reloc_control *rc = root->fs_info->reloc_ctl; - if (!root->reloc_root || !rc) + if (!rc || !have_reloc_root(root)) return; if (!rc->merge_reloc_tree) @@ -4751,7 +4793,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, struct reloc_control *rc = root->fs_info->reloc_ctl; int ret; - if (!root->reloc_root || !rc) + if (!rc || !have_reloc_root(root)) return 0; rc = root->fs_info->reloc_ctl; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 3b17b647d002..612411c74550 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -376,11 +376,13 @@ again: leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - - WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid); - WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len); ptr = (unsigned long)(ref + 1); - WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len)); + if ((btrfs_root_ref_dirid(leaf, ref) != dirid) || + (btrfs_root_ref_name_len(leaf, ref) != name_len) || + memcmp_extent_buffer(leaf, name, ptr, name_len)) { + err = -ENOENT; + goto out; + } *sequence = btrfs_root_ref_sequence(leaf, ref); ret = btrfs_del_item(trans, tree_root, path); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index ae2db5eb1549..091e5bc8c7ea 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -7084,12 +7084,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) spin_unlock(&send_root->root_item_lock); /* - * This is done when we lookup the root, it should already be complete - * by the time we get here. - */ - WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE); - - /* * Userspace tools do the checks and warn the user if it's * not RO. */ diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index 1a846bf6e197..914eea5ba6a7 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -452,9 +452,9 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, root->fs_info->tree_root = root; root->node = alloc_test_extent_buffer(root->fs_info, nodesize); - if (!root->node) { + if (IS_ERR(root->node)) { test_std_err(TEST_ALLOC_EXTENT_BUFFER); - ret = -ENOMEM; + ret = PTR_ERR(root->node); goto out; } btrfs_set_header_level(root->node, 0); diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index 09aaca1efd62..ac035a6fa003 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -484,9 +484,9 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize) * *cough*backref walking code*cough* */ root->node = alloc_test_extent_buffer(root->fs_info, nodesize); - if (!root->node) { + if (IS_ERR(root->node)) { test_err("couldn't allocate dummy buffer"); - ret = -ENOMEM; + ret = PTR_ERR(root->node); goto out; } btrfs_set_header_level(root->node, 0); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 493d4d9e0f79..97f3520b8d98 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -227,7 +227,7 @@ static int check_extent_data_item(struct extent_buffer *leaf, */ if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) { file_extent_err(leaf, slot, - "invalid item size, have %u expect [%lu, %u)", + "invalid item size, have %u expect [%zu, %u)", item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START, SZ_4K); return -EUCLEAN; @@ -332,7 +332,7 @@ static int check_extent_data_item(struct extent_buffer *leaf, } static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, - int slot) + int slot, struct btrfs_key *prev_key) { struct btrfs_fs_info *fs_info = leaf->fs_info; u32 sectorsize = fs_info->sectorsize; @@ -356,6 +356,20 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, btrfs_item_size_nr(leaf, slot), csumsize); return -EUCLEAN; } + if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) { + u64 prev_csum_end; + u32 prev_item_size; + + prev_item_size = btrfs_item_size_nr(leaf, slot - 1); + prev_csum_end = (prev_item_size / csumsize) * sectorsize; + prev_csum_end += prev_key->offset; + if (prev_csum_end > key->offset) { + generic_err(leaf, slot - 1, +"csum end range (%llu) goes beyond the start range (%llu) of the next csum item", + prev_csum_end, key->offset); + return -EUCLEAN; + } + } return 0; } @@ -1355,7 +1369,7 @@ static int check_leaf_item(struct extent_buffer *leaf, ret = check_extent_data_item(leaf, key, slot, prev_key); break; case BTRFS_EXTENT_CSUM_KEY: - ret = check_csum_item(leaf, key, slot); + ret = check_csum_item(leaf, key, slot, prev_key); break; case BTRFS_DIR_ITEM_KEY: case BTRFS_DIR_INDEX_KEY: diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 6f757361db53..d3f115909ff0 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -808,7 +808,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum, list); if (!ret) - ret = btrfs_del_csums(trans, fs_info, + ret = btrfs_del_csums(trans, + fs_info->csum_root, sums->bytenr, sums->len); if (!ret) @@ -3909,6 +3910,28 @@ static int log_inode_item(struct btrfs_trans_handle *trans, return 0; } +static int log_csums(struct btrfs_trans_handle *trans, + struct btrfs_root *log_root, + struct btrfs_ordered_sum *sums) +{ + int ret; + + /* + * Due to extent cloning, we might have logged a csum item that covers a + * subrange of a cloned extent, and later we can end up logging a csum + * item for a larger subrange of the same extent or the entire range. + * This would leave csum items in the log tree that cover the same range + * and break the searches for checksums in the log tree, resulting in + * some checksums missing in the fs/subvolume tree. So just delete (or + * trim and adjust) any existing csum items in the log for this range. + */ + ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len); + if (ret) + return ret; + + return btrfs_csum_file_blocks(trans, log_root, sums); +} + static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *dst_path, @@ -4054,7 +4077,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum, list); if (!ret) - ret = btrfs_csum_file_blocks(trans, log, sums); + ret = log_csums(trans, log, sums); list_del(&sums->list); kfree(sums); } @@ -4274,7 +4297,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum, list); if (!ret) - ret = btrfs_csum_file_blocks(trans, log_root, sums); + ret = log_csums(trans, log_root, sums); list_del(&sums->list); kfree(sums); } @@ -6294,9 +6317,28 @@ again: wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); if (IS_ERR(wc.replay_dest)) { ret = PTR_ERR(wc.replay_dest); + + /* + * We didn't find the subvol, likely because it was + * deleted. This is ok, simply skip this log and go to + * the next one. + * + * We need to exclude the root because we can't have + * other log replays overwriting this log as we'll read + * it back in a few more times. This will keep our + * block from being modified, and we'll just bail for + * each subsequent pass. + */ + if (ret == -ENOENT) + ret = btrfs_pin_extent_for_log_replay(fs_info, + log->node->start, + log->node->len); free_extent_buffer(log->node); free_extent_buffer(log->commit_root); kfree(log); + + if (!ret) + goto next; btrfs_handle_fs_error(fs_info, ret, "Couldn't read target root for tree log recovery."); goto error; @@ -6328,7 +6370,6 @@ again: &root->highest_objectid); } - key.offset = found_key.offset - 1; wc.replay_dest->log_root = NULL; free_extent_buffer(log->node); free_extent_buffer(log->commit_root); @@ -6336,9 +6377,10 @@ again: if (ret) goto error; - +next: if (found_key.offset == 0) break; + key.offset = found_key.offset - 1; } btrfs_release_path(path); diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index 91caab63bdf5..76b84f2397b1 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c @@ -324,6 +324,8 @@ again_search_slot: } if (ret < 0 && ret != -ENOENT) goto out; + key.offset++; + goto again_search_slot; } item_size -= sizeof(subid_le); offset += sizeof(subid_le); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d8e5560db285..9b78e720c697 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -61,7 +61,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { [BTRFS_RAID_RAID1C3] = { .sub_stripes = 1, .dev_stripes = 1, - .devs_max = 0, + .devs_max = 3, .devs_min = 3, .tolerated_failures = 2, .devs_increment = 3, @@ -73,7 +73,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { [BTRFS_RAID_RAID1C4] = { .sub_stripes = 1, .dev_stripes = 1, - .devs_max = 0, + .devs_max = 4, .devs_min = 4, .tolerated_failures = 3, .devs_increment = 4, @@ -3881,7 +3881,11 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, } } - num_devices = btrfs_num_devices(fs_info); + /* + * rw_devices will not change at the moment, device add/delete/replace + * are excluded by EXCL_OP + */ + num_devices = fs_info->fs_devices->rw_devices; /* * SINGLE profile on-disk has no profile bit, but in-memory we have a diff --git a/fs/buffer.c b/fs/buffer.c index d8c7242426bb..18a87ec8a465 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3031,11 +3031,9 @@ static void end_bio_bh_io_sync(struct bio *bio) * errors, this only handles the "we need to be able to * do IO at the final sector" case. */ -void guard_bio_eod(int op, struct bio *bio) +void guard_bio_eod(struct bio *bio) { sector_t maxsector; - struct bio_vec *bvec = bio_last_bvec_all(bio); - unsigned truncated_bytes; struct hd_struct *part; rcu_read_lock(); @@ -3061,28 +3059,7 @@ void guard_bio_eod(int op, struct bio *bio) if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) return; - /* Uhhuh. We've got a bio that straddles the device size! */ - truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); - - /* - * The bio contains more than one segment which spans EOD, just return - * and let IO layer turn it into an EIO - */ - if (truncated_bytes > bvec->bv_len) - return; - - /* Truncate the bio.. */ - bio->bi_iter.bi_size -= truncated_bytes; - bvec->bv_len -= truncated_bytes; - - /* ..and clear the end of the buffer for reads */ - if (op == REQ_OP_READ) { - struct bio_vec bv; - - mp_bvec_last_segment(bvec, &bv); - zero_user(bv.bv_page, bv.bv_offset + bv.bv_len, - truncated_bytes); - } + bio_truncate(bio, maxsector << 9); } static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, @@ -3118,15 +3095,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, bio->bi_end_io = end_bio_bh_io_sync; bio->bi_private = bh; - /* Take care of bh's that straddle the end of the device */ - guard_bio_eod(op, bio); - if (buffer_meta(bh)) op_flags |= REQ_META; if (buffer_prio(bh)) op_flags |= REQ_PRIO; bio_set_op_attrs(bio, op, op_flags); + /* Take care of bh's that straddle the end of the device */ + guard_bio_eod(bio); + if (wbc) { wbc_init_bio(wbc, bio); wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); diff --git a/fs/char_dev.c b/fs/char_dev.c index 00dfe17871ac..c5e6eff5a381 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -352,7 +352,7 @@ static struct kobject *cdev_get(struct cdev *p) if (owner && !try_module_get(owner)) return NULL; - kobj = kobject_get(&p->kobj); + kobj = kobject_get_unless_zero(&p->kobj); if (!kobj) module_put(owner); return kobj; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index ce9bac756c2a..40705e862451 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1693,6 +1693,7 @@ struct cifs_fattr { struct timespec64 cf_atime; struct timespec64 cf_mtime; struct timespec64 cf_ctime; + u32 cf_cifstag; }; static inline void free_dfs_info_param(struct dfs_info3_param *param) diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 3925a7bfc74d..d17587c2c4ab 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -139,6 +139,28 @@ retry: dput(dentry); } +static bool reparse_file_needs_reval(const struct cifs_fattr *fattr) +{ + if (!(fattr->cf_cifsattrs & ATTR_REPARSE)) + return false; + /* + * The DFS tags should be only intepreted by server side as per + * MS-FSCC 2.1.2.1, but let's include them anyway. + * + * Besides, if cf_cifstag is unset (0), then we still need it to be + * revalidated to know exactly what reparse point it is. + */ + switch (fattr->cf_cifstag) { + case IO_REPARSE_TAG_DFS: + case IO_REPARSE_TAG_DFSR: + case IO_REPARSE_TAG_SYMLINK: + case IO_REPARSE_TAG_NFS: + case 0: + return true; + } + return false; +} + static void cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) { @@ -158,7 +180,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) * is a symbolic link, DFS referral or a reparse point with a direct * access like junctions, deduplicated files, NFS symlinks. */ - if (fattr->cf_cifsattrs & ATTR_REPARSE) + if (reparse_file_needs_reval(fattr)) fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; /* non-unix readdir doesn't provide nlink */ @@ -194,19 +216,37 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) } } +static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info) +{ + const FILE_DIRECTORY_INFO *fi = info; + + memset(fattr, 0, sizeof(*fattr)); + fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes); + fattr->cf_eof = le64_to_cpu(fi->EndOfFile); + fattr->cf_bytes = le64_to_cpu(fi->AllocationSize); + fattr->cf_createtime = le64_to_cpu(fi->CreationTime); + fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime); + fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime); + fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime); +} + void cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, struct cifs_sb_info *cifs_sb) { - memset(fattr, 0, sizeof(*fattr)); - fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes); - fattr->cf_eof = le64_to_cpu(info->EndOfFile); - fattr->cf_bytes = le64_to_cpu(info->AllocationSize); - fattr->cf_createtime = le64_to_cpu(info->CreationTime); - fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); - fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); - fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); + __dir_info_to_fattr(fattr, info); + cifs_fill_common_info(fattr, cifs_sb); +} +static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr, + SEARCH_ID_FULL_DIR_INFO *info, + struct cifs_sb_info *cifs_sb) +{ + __dir_info_to_fattr(fattr, info); + + /* See MS-FSCC 2.4.18 FileIdFullDirectoryInformation */ + if (fattr->cf_cifsattrs & ATTR_REPARSE) + fattr->cf_cifstag = le32_to_cpu(info->EaSize); cifs_fill_common_info(fattr, cifs_sb); } @@ -755,6 +795,11 @@ static int cifs_filldir(char *find_entry, struct file *file, (FIND_FILE_STANDARD_INFO *)find_entry, cifs_sb); break; + case SMB_FIND_FILE_ID_FULL_DIR_INFO: + cifs_fulldir_info_to_fattr(&fattr, + (SEARCH_ID_FULL_DIR_INFO *)find_entry, + cifs_sb); + break; default: cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)find_entry, diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 8b0b512c5792..afe1f03aabe3 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, goto out; - if (oparms->tcon->use_resilient) { + if (oparms->tcon->use_resilient) { /* default timeout is 0, servers pick default (120 seconds) */ nr_ioctl_req.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); diff --git a/fs/direct-io.c b/fs/direct-io.c index 0ec4f270139f..00b4d15bb811 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -39,6 +39,8 @@ #include <linux/atomic.h> #include <linux/prefetch.h> +#include "internal.h" + /* * How many user pages to map in one call to get_user_pages(). This determines * the size of a structure in the slab cache diff --git a/fs/drop_caches.c b/fs/drop_caches.c index d31b6c72b476..dc1a1d5d825b 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); - cond_resched(); invalidate_mapping_pages(inode->i_mapping, 0, -1); iput(toput_inode); toput_inode = inode; + cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index d4d4fdfac1a6..1ee04e76bbe0 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -133,10 +133,13 @@ static void debug_print_tree(struct ext4_sb_info *sbi) { struct rb_node *node; struct ext4_system_zone *entry; + struct ext4_system_blocks *system_blks; int first = 1; printk(KERN_INFO "System zones: "); - node = rb_first(&sbi->system_blks->root); + rcu_read_lock(); + system_blks = rcu_dereference(sbi->system_blks); + node = rb_first(&system_blks->root); while (node) { entry = rb_entry(node, struct ext4_system_zone, node); printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ", @@ -144,6 +147,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi) first = 0; node = rb_next(node); } + rcu_read_unlock(); printk(KERN_CONT "\n"); } diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 9fdd2b269d61..9f00fc0bf21d 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -72,6 +72,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, const char *error_msg = NULL; const int rlen = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize); + const int next_offset = ((char *) de - buf) + rlen; if (unlikely(rlen < EXT4_DIR_REC_LEN(1))) error_msg = "rec_len is smaller than minimal"; @@ -79,8 +80,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, error_msg = "rec_len % 4 != 0"; else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; - else if (unlikely(((char *) de - buf) + rlen > size)) + else if (unlikely(next_offset > size)) error_msg = "directory entry overrun"; + else if (unlikely(next_offset > size - EXT4_DIR_REC_LEN(1) && + next_offset != size)) + error_msg = "directory entry too close to block end"; else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index dc333e8e51e8..8ca4a23129aa 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -921,8 +921,8 @@ repeat_in_this_group: if (!handle) { BUG_ON(nblocks <= 0); handle = __ext4_journal_start_sb(dir->i_sb, line_no, - handle_type, nblocks, - 0, 0); + handle_type, nblocks, 0, + ext4_trans_default_revoke_credits(sb)); if (IS_ERR(handle)) { err = PTR_ERR(handle); ext4_std_error(sb, err); diff --git a/fs/ext4/inode-test.c b/fs/ext4/inode-test.c index 92a9da1774aa..bbce1c328d85 100644 --- a/fs/ext4/inode-test.c +++ b/fs/ext4/inode-test.c @@ -25,7 +25,7 @@ * For constructing the negative timestamp lower bound value. * binary: 10000000 00000000 00000000 00000000 */ -#define LOWER_MSB_1 (-0x80000000L) +#define LOWER_MSB_1 (-(UPPER_MSB_0) - 1L) /* avoid overflow */ /* * For constructing the negative timestamp upper bound value. * binary: 11111111 11111111 11111111 11111111 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 28f28de0c1b6..629a25d999f0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5692,7 +5692,7 @@ int ext4_expand_extra_isize(struct inode *inode, error = ext4_journal_get_write_access(handle, iloc->bh); if (error) { brelse(iloc->bh); - goto out_stop; + goto out_unlock; } error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, @@ -5702,8 +5702,8 @@ int ext4_expand_extra_isize(struct inode *inode, if (!error) error = rc; +out_unlock: ext4_write_unlock_xattr(inode, &no_expand); -out_stop: ext4_journal_stop(handle); return error; } diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a856997d87b5..1cb42d940784 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2164,7 +2164,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, struct buffer_head *bh = NULL; struct ext4_dir_entry_2 *de; struct super_block *sb; +#ifdef CONFIG_UNICODE struct ext4_sb_info *sbi; +#endif struct ext4_filename fname; int retval; int dx_fallback=0; @@ -2176,12 +2178,12 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, csum_size = sizeof(struct ext4_dir_entry_tail); sb = dir->i_sb; - sbi = EXT4_SB(sb); blocksize = sb->s_blocksize; if (!dentry->d_name.len) return -EINVAL; #ifdef CONFIG_UNICODE + sbi = EXT4_SB(sb); if (ext4_has_strict_mode(sbi) && IS_CASEFOLDED(dir) && sbi->s_encoding && utf8_validate(sbi->s_encoding, &dentry->d_name)) return -EINVAL; @@ -2822,7 +2824,7 @@ bool ext4_empty_dir(struct inode *inode) { unsigned int offset; struct buffer_head *bh; - struct ext4_dir_entry_2 *de, *de1; + struct ext4_dir_entry_2 *de; struct super_block *sb; if (ext4_has_inline_data(inode)) { @@ -2847,19 +2849,25 @@ bool ext4_empty_dir(struct inode *inode) return true; de = (struct ext4_dir_entry_2 *) bh->b_data; - de1 = ext4_next_entry(de, sb->s_blocksize); - if (le32_to_cpu(de->inode) != inode->i_ino || - le32_to_cpu(de1->inode) == 0 || - strcmp(".", de->name) || strcmp("..", de1->name)) { - ext4_warning_inode(inode, "directory missing '.' and/or '..'"); + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, + 0) || + le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) { + ext4_warning_inode(inode, "directory missing '.'"); + brelse(bh); + return true; + } + offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); + de = ext4_next_entry(de, sb->s_blocksize); + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, + offset) || + le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { + ext4_warning_inode(inode, "directory missing '..'"); brelse(bh); return true; } - offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) + - ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize); - de = ext4_next_entry(de1, sb->s_blocksize); + offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); while (offset < inode->i_size) { - if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { + if (!(offset & (sb->s_blocksize - 1))) { unsigned int lblock; brelse(bh); lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb); @@ -2870,12 +2878,11 @@ bool ext4_empty_dir(struct inode *inode) } if (IS_ERR(bh)) return true; - de = (struct ext4_dir_entry_2 *) bh->b_data; } + de = (struct ext4_dir_entry_2 *) (bh->b_data + + (offset & (sb->s_blocksize - 1))); if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, offset)) { - de = (struct ext4_dir_entry_2 *)(bh->b_data + - sb->s_blocksize); offset = (offset | (sb->s_blocksize - 1)) + 1; continue; } @@ -2884,7 +2891,6 @@ bool ext4_empty_dir(struct inode *inode) return false; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); - de = ext4_next_entry(de, sb->s_blocksize); } brelse(bh); return true; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 1d82b56d9b11..2937a8873fe1 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1900,6 +1900,13 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, } sbi->s_commit_interval = HZ * arg; } else if (token == Opt_debug_want_extra_isize) { + if ((arg & 1) || + (arg < 4) || + (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) { + ext4_msg(sb, KERN_ERR, + "Invalid want_extra_isize %d", arg); + return -1; + } sbi->s_want_extra_isize = arg; } else if (token == Opt_max_batch_time) { sbi->s_max_batch_time = arg; @@ -3554,40 +3561,6 @@ int ext4_calculate_overhead(struct super_block *sb) return 0; } -static void ext4_clamp_want_extra_isize(struct super_block *sb) -{ - struct ext4_sb_info *sbi = EXT4_SB(sb); - struct ext4_super_block *es = sbi->s_es; - unsigned def_extra_isize = sizeof(struct ext4_inode) - - EXT4_GOOD_OLD_INODE_SIZE; - - if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) { - sbi->s_want_extra_isize = 0; - return; - } - if (sbi->s_want_extra_isize < 4) { - sbi->s_want_extra_isize = def_extra_isize; - if (ext4_has_feature_extra_isize(sb)) { - if (sbi->s_want_extra_isize < - le16_to_cpu(es->s_want_extra_isize)) - sbi->s_want_extra_isize = - le16_to_cpu(es->s_want_extra_isize); - if (sbi->s_want_extra_isize < - le16_to_cpu(es->s_min_extra_isize)) - sbi->s_want_extra_isize = - le16_to_cpu(es->s_min_extra_isize); - } - } - /* Check if enough inode space is available */ - if ((sbi->s_want_extra_isize > sbi->s_inode_size) || - (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > - sbi->s_inode_size)) { - sbi->s_want_extra_isize = def_extra_isize; - ext4_msg(sb, KERN_INFO, - "required extra inode space not available"); - } -} - static void ext4_set_resv_clusters(struct super_block *sb) { ext4_fsblk_t resv_clusters; @@ -3795,6 +3768,68 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; + if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { + sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; + sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; + } else { + sbi->s_inode_size = le16_to_cpu(es->s_inode_size); + sbi->s_first_ino = le32_to_cpu(es->s_first_ino); + if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { + ext4_msg(sb, KERN_ERR, "invalid first ino: %u", + sbi->s_first_ino); + goto failed_mount; + } + if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || + (!is_power_of_2(sbi->s_inode_size)) || + (sbi->s_inode_size > blocksize)) { + ext4_msg(sb, KERN_ERR, + "unsupported inode size: %d", + sbi->s_inode_size); + goto failed_mount; + } + /* + * i_atime_extra is the last extra field available for + * [acm]times in struct ext4_inode. Checking for that + * field should suffice to ensure we have extra space + * for all three. + */ + if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + + sizeof(((struct ext4_inode *)0)->i_atime_extra)) { + sb->s_time_gran = 1; + sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; + } else { + sb->s_time_gran = NSEC_PER_SEC; + sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; + } + sb->s_time_min = EXT4_TIMESTAMP_MIN; + } + if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { + sbi->s_want_extra_isize = sizeof(struct ext4_inode) - + EXT4_GOOD_OLD_INODE_SIZE; + if (ext4_has_feature_extra_isize(sb)) { + unsigned v, max = (sbi->s_inode_size - + EXT4_GOOD_OLD_INODE_SIZE); + + v = le16_to_cpu(es->s_want_extra_isize); + if (v > max) { + ext4_msg(sb, KERN_ERR, + "bad s_want_extra_isize: %d", v); + goto failed_mount; + } + if (sbi->s_want_extra_isize < v) + sbi->s_want_extra_isize = v; + + v = le16_to_cpu(es->s_min_extra_isize); + if (v > max) { + ext4_msg(sb, KERN_ERR, + "bad s_min_extra_isize: %d", v); + goto failed_mount; + } + if (sbi->s_want_extra_isize < v) + sbi->s_want_extra_isize = v; + } + } + if (sbi->s_es->s_mount_opts[0]) { char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, sizeof(sbi->s_es->s_mount_opts), @@ -4033,42 +4068,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) has_huge_files); sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); - if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { - sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; - sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; - } else { - sbi->s_inode_size = le16_to_cpu(es->s_inode_size); - sbi->s_first_ino = le32_to_cpu(es->s_first_ino); - if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { - ext4_msg(sb, KERN_ERR, "invalid first ino: %u", - sbi->s_first_ino); - goto failed_mount; - } - if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || - (!is_power_of_2(sbi->s_inode_size)) || - (sbi->s_inode_size > blocksize)) { - ext4_msg(sb, KERN_ERR, - "unsupported inode size: %d", - sbi->s_inode_size); - goto failed_mount; - } - /* - * i_atime_extra is the last extra field available for [acm]times in - * struct ext4_inode. Checking for that field should suffice to ensure - * we have extra space for all three. - */ - if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + - sizeof(((struct ext4_inode *)0)->i_atime_extra)) { - sb->s_time_gran = 1; - sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; - } else { - sb->s_time_gran = NSEC_PER_SEC; - sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; - } - - sb->s_time_min = EXT4_TIMESTAMP_MIN; - } - sbi->s_desc_size = le16_to_cpu(es->s_desc_size); if (ext4_has_feature_64bit(sb)) { if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || @@ -4517,8 +4516,6 @@ no_journal: } else if (ret) goto failed_mount4a; - ext4_clamp_want_extra_isize(sb); - ext4_set_resv_clusters(sb); err = ext4_setup_system_zone(sb); @@ -5306,8 +5303,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - ext4_clamp_want_extra_isize(sb); - if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ test_opt(sb, JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "changing journal_checksum " diff --git a/fs/file.c b/fs/file.c index 2f4fcf985079..3da91a112bab 100644 --- a/fs/file.c +++ b/fs/file.c @@ -960,7 +960,7 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) return ksys_dup3(oldfd, newfd, 0); } -SYSCALL_DEFINE1(dup, unsigned int, fildes) +int ksys_dup(unsigned int fildes) { int ret = -EBADF; struct file *file = fget_raw(fildes); @@ -975,6 +975,11 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes) return ret; } +SYSCALL_DEFINE1(dup, unsigned int, fildes) +{ + return ksys_dup(fildes); +} + int f_dupfd(unsigned int from, struct file *file, unsigned flags) { int err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a63d779eac10..ce715380143c 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -882,6 +882,7 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) struct fuse_args_pages *ap = &ia->ap; loff_t pos = page_offset(ap->pages[0]); size_t count = ap->num_pages << PAGE_SHIFT; + ssize_t res; int err; ap->args.out_pages = true; @@ -896,7 +897,8 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) if (!err) return; } else { - err = fuse_simple_request(fc, &ap->args); + res = fuse_simple_request(fc, &ap->args); + err = res < 0 ? res : 0; } fuse_readpages_end(fc, &ap->args, err); } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index d5c2a3158610..a66e425884d1 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1498,8 +1498,10 @@ static int __init init_hugetlbfs_fs(void) /* other hstates are optional */ i = 0; for_each_hstate(h) { - if (i == default_hstate_idx) + if (i == default_hstate_idx) { + i++; continue; + } mnt = mount_one_hugetlbfs(h); if (IS_ERR(mnt)) diff --git a/fs/inode.c b/fs/inode.c index fef457a42882..96d62d97694e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -676,6 +676,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) struct inode *inode, *next; LIST_HEAD(dispose); +again: spin_lock(&sb->s_inode_list_lock); list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { spin_lock(&inode->i_lock); @@ -698,6 +699,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) inode_lru_list_del(inode); spin_unlock(&inode->i_lock); list_add(&inode->i_lru, &dispose); + if (need_resched()) { + spin_unlock(&sb->s_inode_list_lock); + cond_resched(); + dispose_list(&dispose); + goto again; + } } spin_unlock(&sb->s_inode_list_lock); diff --git a/fs/internal.h b/fs/internal.h index 4a7da1df573d..e3fa69544b66 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) /* * buffer.c */ -extern void guard_bio_eod(int rw, struct bio *bio); +extern void guard_bio_eod(struct bio *bio); extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, get_block_t *get_block, struct iomap *iomap); diff --git a/fs/io-wq.c b/fs/io-wq.c index 90c4978781fb..5147d2213b01 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -92,7 +92,6 @@ struct io_wqe { struct io_wqe_acct acct[2]; struct hlist_nulls_head free_list; - struct hlist_nulls_head busy_list; struct list_head all_list; struct io_wq *wq; @@ -327,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, if (worker->flags & IO_WORKER_F_FREE) { worker->flags &= ~IO_WORKER_F_FREE; hlist_nulls_del_init_rcu(&worker->nulls_node); - hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list); } /* @@ -365,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) { if (!(worker->flags & IO_WORKER_F_FREE)) { worker->flags |= IO_WORKER_F_FREE; - hlist_nulls_del_init_rcu(&worker->nulls_node); hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); } @@ -432,6 +429,8 @@ next: if (signal_pending(current)) flush_signals(current); + cond_resched(); + spin_lock_irq(&worker->lock); worker->cur_work = work; spin_unlock_irq(&worker->lock); @@ -446,10 +445,14 @@ next: task_unlock(current); } if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm && - wq->mm && mmget_not_zero(wq->mm)) { - use_mm(wq->mm); - set_fs(USER_DS); - worker->mm = wq->mm; + wq->mm) { + if (mmget_not_zero(wq->mm)) { + use_mm(wq->mm); + set_fs(USER_DS); + worker->mm = wq->mm; + } else { + work->flags |= IO_WQ_WORK_CANCEL; + } } if (!worker->creds) worker->creds = override_creds(wq->creds); @@ -798,10 +801,6 @@ void io_wq_cancel_all(struct io_wq *wq) set_bit(IO_WQ_BIT_CANCEL, &wq->state); - /* - * Browse both lists, as there's a gap between handing work off - * to a worker and the worker putting itself on the busy_list - */ rcu_read_lock(); for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; @@ -948,7 +947,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, /* * Now check if a free (going busy) or busy worker has the work * currently running. If we find it there, we'll return CANCEL_RUNNING - * as an indication that we attempte to signal cancellation. The + * as an indication that we attempt to signal cancellation. The * completion will run normally in this case. */ rcu_read_lock(); @@ -1049,7 +1048,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) spin_lock_init(&wqe->lock); INIT_WQ_LIST(&wqe->work_list); INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); - INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1); INIT_LIST_HEAD(&wqe->all_list); } diff --git a/fs/io-wq.h b/fs/io-wq.h index fb993b2bd0ef..3f5e356de980 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -120,6 +120,10 @@ static inline void io_wq_worker_sleeping(struct task_struct *tsk) static inline void io_wq_worker_running(struct task_struct *tsk) { } -#endif /* CONFIG_IO_WQ */ +#endif -#endif /* INTERNAL_IO_WQ_H */ +static inline bool io_wq_current_is_worker(void) +{ + return in_task() && (current->flags & PF_IO_WORKER); +} +#endif diff --git a/fs/io_uring.c b/fs/io_uring.c index 9b1833fedc5c..187dd94fd6b1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -289,7 +289,10 @@ struct io_ring_ctx { */ struct io_poll_iocb { struct file *file; - struct wait_queue_head *head; + union { + struct wait_queue_head *head; + u64 addr; + }; __poll_t events; bool done; bool canceled; @@ -304,6 +307,51 @@ struct io_timeout_data { u32 seq_offset; }; +struct io_accept { + struct file *file; + struct sockaddr __user *addr; + int __user *addr_len; + int flags; +}; + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; +}; + +struct io_cancel { + struct file *file; + u64 addr; +}; + +struct io_timeout { + struct file *file; + u64 addr; + int flags; + unsigned count; +}; + +struct io_rw { + /* NOTE: kiocb has the file as the first member, so don't do it here */ + struct kiocb kiocb; + u64 addr; + u64 len; +}; + +struct io_connect { + struct file *file; + struct sockaddr __user *addr; + int addr_len; +}; + +struct io_sr_msg { + struct file *file; + struct user_msghdr __user *msg; + int msg_flags; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -323,7 +371,6 @@ struct io_async_rw { }; struct io_async_ctx { - struct io_uring_sqe sqe; union { struct io_async_rw rw; struct io_async_msghdr msg; @@ -341,17 +388,23 @@ struct io_async_ctx { struct io_kiocb { union { struct file *file; - struct kiocb rw; + struct io_rw rw; struct io_poll_iocb poll; + struct io_accept accept; + struct io_sync sync; + struct io_cancel cancel; + struct io_timeout timeout; + struct io_connect connect; + struct io_sr_msg sr_msg; }; - const struct io_uring_sqe *sqe; struct io_async_ctx *io; struct file *ring_file; int ring_fd; bool has_user; bool in_async; bool needs_fixed_file; + u8 opcode; struct io_ring_ctx *ctx; union { @@ -564,12 +617,10 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) } } -static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) +static inline bool io_req_needs_user(struct io_kiocb *req) { - u8 opcode = READ_ONCE(sqe->opcode); - - return !(opcode == IORING_OP_READ_FIXED || - opcode == IORING_OP_WRITE_FIXED); + return !(req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED); } static inline bool io_prep_async_work(struct io_kiocb *req, @@ -577,33 +628,31 @@ static inline bool io_prep_async_work(struct io_kiocb *req, { bool do_hashed = false; - if (req->sqe) { - switch (req->sqe->opcode) { - case IORING_OP_WRITEV: - case IORING_OP_WRITE_FIXED: - /* only regular files should be hashed for writes */ - if (req->flags & REQ_F_ISREG) - do_hashed = true; - /* fall-through */ - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - case IORING_OP_SENDMSG: - case IORING_OP_RECVMSG: - case IORING_OP_ACCEPT: - case IORING_OP_POLL_ADD: - case IORING_OP_CONNECT: - /* - * We know REQ_F_ISREG is not set on some of these - * opcodes, but this enables us to keep the check in - * just one place. - */ - if (!(req->flags & REQ_F_ISREG)) - req->work.flags |= IO_WQ_WORK_UNBOUND; - break; - } - if (io_sqe_needs_user(req->sqe)) - req->work.flags |= IO_WQ_WORK_NEEDS_USER; + switch (req->opcode) { + case IORING_OP_WRITEV: + case IORING_OP_WRITE_FIXED: + /* only regular files should be hashed for writes */ + if (req->flags & REQ_F_ISREG) + do_hashed = true; + /* fall-through */ + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_SENDMSG: + case IORING_OP_RECVMSG: + case IORING_OP_ACCEPT: + case IORING_OP_POLL_ADD: + case IORING_OP_CONNECT: + /* + * We know REQ_F_ISREG is not set on some of these + * opcodes, but this enables us to keep the check in + * just one place. + */ + if (!(req->flags & REQ_F_ISREG)) + req->work.flags |= IO_WQ_WORK_UNBOUND; + break; } + if (io_req_needs_user(req)) + req->work.flags |= IO_WQ_WORK_NEEDS_USER; *link = io_prep_linked_timeout(req); return do_hashed; @@ -972,7 +1021,7 @@ static void io_fail_links(struct io_kiocb *req) trace_io_uring_fail_link(req, link); if ((req->flags & REQ_F_LINK_TIMEOUT) && - link->sqe->opcode == IORING_OP_LINK_TIMEOUT) { + link->opcode == IORING_OP_LINK_TIMEOUT) { io_link_cancel_timeout(link); } else { io_cqring_fill_event(link, -ECANCELED); @@ -1148,7 +1197,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, ret = 0; list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) { - struct kiocb *kiocb = &req->rw; + struct kiocb *kiocb = &req->rw.kiocb; /* * Move completed entries to our local list. If we find a @@ -1178,7 +1227,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, } /* - * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a + * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a * non-spinning poll check - we'll still enter the driver poll loop, but only * as a non-spinning completion check. */ @@ -1303,7 +1352,7 @@ static inline void req_set_fail_links(struct io_kiocb *req) static void io_complete_rw_common(struct kiocb *kiocb, long res) { - struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); if (kiocb->ki_flags & IOCB_WRITE) kiocb_end_write(req); @@ -1315,7 +1364,7 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res) static void io_complete_rw(struct kiocb *kiocb, long res, long res2) { - struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); io_complete_rw_common(kiocb, res); io_put_req(req); @@ -1323,7 +1372,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2) static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res) { - struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); struct io_kiocb *nxt = NULL; io_complete_rw_common(kiocb, res); @@ -1334,7 +1383,7 @@ static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res) static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) { - struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); if (kiocb->ki_flags & IOCB_WRITE) kiocb_end_write(req); @@ -1368,7 +1417,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req) list_req = list_first_entry(&ctx->poll_list, struct io_kiocb, list); - if (list_req->rw.ki_filp != req->rw.ki_filp) + if (list_req->file != req->file) ctx->poll_multi_file = true; } @@ -1439,11 +1488,11 @@ static bool io_file_supports_async(struct file *file) return false; } -static int io_prep_rw(struct io_kiocb *req, bool force_nonblock) +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, + bool force_nonblock) { - const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; - struct kiocb *kiocb = &req->rw; + struct kiocb *kiocb = &req->rw.kiocb; unsigned ioprio; int ret; @@ -1492,6 +1541,12 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock) return -EINVAL; kiocb->ki_complete = io_complete_rw; } + + req->rw.addr = READ_ONCE(sqe->addr); + req->rw.len = READ_ONCE(sqe->len); + /* we own ->private, reuse it for the buffer index */ + req->rw.kiocb.private = (void *) (unsigned long) + READ_ONCE(sqe->buf_index); return 0; } @@ -1525,11 +1580,11 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt, io_rw_done(kiocb, ret); } -static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw, - const struct io_uring_sqe *sqe, +static ssize_t io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) { - size_t len = READ_ONCE(sqe->len); + struct io_ring_ctx *ctx = req->ctx; + size_t len = req->rw.len; struct io_mapped_ubuf *imu; unsigned index, buf_index; size_t offset; @@ -1539,13 +1594,13 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw, if (unlikely(!ctx->user_bufs)) return -EFAULT; - buf_index = READ_ONCE(sqe->buf_index); + buf_index = (unsigned long) req->rw.kiocb.private; if (unlikely(buf_index >= ctx->nr_user_bufs)) return -EFAULT; index = array_index_nospec(buf_index, ctx->nr_user_bufs); imu = &ctx->user_bufs[index]; - buf_addr = READ_ONCE(sqe->addr); + buf_addr = req->rw.addr; /* overflow */ if (buf_addr + len < buf_addr) @@ -1602,25 +1657,20 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw, static ssize_t io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct iov_iter *iter) { - const struct io_uring_sqe *sqe = req->sqe; - void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); - size_t sqe_len = READ_ONCE(sqe->len); + void __user *buf = u64_to_user_ptr(req->rw.addr); + size_t sqe_len = req->rw.len; u8 opcode; - /* - * We're reading ->opcode for the second time, but the first read - * doesn't care whether it's _FIXED or not, so it doesn't matter - * whether ->opcode changes concurrently. The first read does care - * about whether it is a READ or a WRITE, so we don't trust this read - * for that purpose and instead let the caller pass in the read/write - * flag. - */ - opcode = READ_ONCE(sqe->opcode); + opcode = req->opcode; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { *iovec = NULL; - return io_import_fixed(req->ctx, rw, sqe, iter); + return io_import_fixed(req, rw, iter); } + /* buffer index only valid with fixed read/write */ + if (req->rw.kiocb.private) + return -EINVAL; + if (req->io) { struct io_async_rw *iorw = &req->io->rw; @@ -1701,7 +1751,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, return ret; } -static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, +static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size, struct iovec *iovec, struct iovec *fast_iov, struct iov_iter *iter) { @@ -1715,57 +1765,85 @@ static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, } } -static int io_setup_async_io(struct io_kiocb *req, ssize_t io_size, +static int io_alloc_async_ctx(struct io_kiocb *req) +{ + req->io = kmalloc(sizeof(*req->io), GFP_KERNEL); + return req->io == NULL; +} + +static void io_rw_async(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + struct iovec *iov = NULL; + + if (req->io->rw.iov != req->io->rw.fast_iov) + iov = req->io->rw.iov; + io_wq_submit_work(workptr); + kfree(iov); +} + +static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, struct iovec *iovec, struct iovec *fast_iov, struct iov_iter *iter) { - req->io = kmalloc(sizeof(*req->io), GFP_KERNEL); - if (req->io) { - io_req_map_io(req, io_size, iovec, fast_iov, iter); - memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe)); - req->sqe = &req->io->sqe; + if (req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED) return 0; - } + if (!req->io && io_alloc_async_ctx(req)) + return -ENOMEM; - return -ENOMEM; + io_req_map_rw(req, io_size, iovec, fast_iov, iter); + req->work.func = io_rw_async; + return 0; } -static int io_read_prep(struct io_kiocb *req, struct iovec **iovec, - struct iov_iter *iter, bool force_nonblock) +static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, + bool force_nonblock) { + struct io_async_ctx *io; + struct iov_iter iter; ssize_t ret; - ret = io_prep_rw(req, force_nonblock); + ret = io_prep_rw(req, sqe, force_nonblock); if (ret) return ret; if (unlikely(!(req->file->f_mode & FMODE_READ))) return -EBADF; - return io_import_iovec(READ, req, iovec, iter); + if (!req->io) + return 0; + + io = req->io; + io->rw.iov = io->rw.fast_iov; + req->io = NULL; + ret = io_import_iovec(READ, req, &io->rw.iov, &iter); + req->io = io; + if (ret < 0) + return ret; + + io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter); + return 0; } static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; - struct kiocb *kiocb = &req->rw; + struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter iter; - struct file *file; size_t iov_count; ssize_t io_size, ret; - if (!req->io) { - ret = io_read_prep(req, &iovec, &iter, force_nonblock); - if (ret < 0) - return ret; - } else { - ret = io_import_iovec(READ, req, &iovec, &iter); - if (ret < 0) - return ret; - } + ret = io_import_iovec(READ, req, &iovec, &iter); + if (ret < 0) + return ret; - file = req->file; + /* Ensure we clear previously set non-block flag */ + if (!force_nonblock) + req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT; + + req->result = 0; io_size = ret; if (req->flags & REQ_F_LINK) req->result = io_size; @@ -1774,39 +1852,27 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so * we know to async punt it even if it was opened O_NONBLOCK */ - if (force_nonblock && !io_file_supports_async(file)) { + if (force_nonblock && !io_file_supports_async(req->file)) { req->flags |= REQ_F_MUST_PUNT; goto copy_iov; } iov_count = iov_iter_count(&iter); - ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count); if (!ret) { ssize_t ret2; - if (file->f_op->read_iter) - ret2 = call_read_iter(file, kiocb, &iter); + if (req->file->f_op->read_iter) + ret2 = call_read_iter(req->file, kiocb, &iter); else - ret2 = loop_rw_iter(READ, file, kiocb, &iter); + ret2 = loop_rw_iter(READ, req->file, kiocb, &iter); - /* - * In case of a short read, punt to async. This can happen - * if we have data partially cached. Alternatively we can - * return the short read, in which case the application will - * need to issue another SQE and wait for it. That SQE will - * need async punt anyway, so it's more efficient to do it - * here. - */ - if (force_nonblock && !(req->flags & REQ_F_NOWAIT) && - (req->flags & REQ_F_ISREG) && - ret2 > 0 && ret2 < io_size) - ret2 = -EAGAIN; /* Catch -EAGAIN return for forced non-blocking submission */ if (!force_nonblock || ret2 != -EAGAIN) { kiocb_done(kiocb, ret2, nxt, req->in_async); } else { copy_iov: - ret = io_setup_async_io(req, io_size, iovec, + ret = io_setup_async_rw(req, io_size, iovec, inline_vecs, &iter); if (ret) goto out_free; @@ -1814,46 +1880,58 @@ copy_iov: } } out_free: - kfree(iovec); + if (!io_wq_current_is_worker()) + kfree(iovec); return ret; } -static int io_write_prep(struct io_kiocb *req, struct iovec **iovec, - struct iov_iter *iter, bool force_nonblock) +static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, + bool force_nonblock) { + struct io_async_ctx *io; + struct iov_iter iter; ssize_t ret; - ret = io_prep_rw(req, force_nonblock); + ret = io_prep_rw(req, sqe, force_nonblock); if (ret) return ret; if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; - return io_import_iovec(WRITE, req, iovec, iter); + if (!req->io) + return 0; + + io = req->io; + io->rw.iov = io->rw.fast_iov; + req->io = NULL; + ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter); + req->io = io; + if (ret < 0) + return ret; + + io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter); + return 0; } static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; - struct kiocb *kiocb = &req->rw; + struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter iter; - struct file *file; size_t iov_count; ssize_t ret, io_size; - if (!req->io) { - ret = io_write_prep(req, &iovec, &iter, force_nonblock); - if (ret < 0) - return ret; - } else { - ret = io_import_iovec(WRITE, req, &iovec, &iter); - if (ret < 0) - return ret; - } + ret = io_import_iovec(WRITE, req, &iovec, &iter); + if (ret < 0) + return ret; - file = kiocb->ki_filp; + /* Ensure we clear previously set non-block flag */ + if (!force_nonblock) + req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT; + + req->result = 0; io_size = ret; if (req->flags & REQ_F_LINK) req->result = io_size; @@ -1873,7 +1951,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, goto copy_iov; iov_count = iov_iter_count(&iter); - ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count); if (!ret) { ssize_t ret2; @@ -1885,22 +1963,22 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, * we return to userspace. */ if (req->flags & REQ_F_ISREG) { - __sb_start_write(file_inode(file)->i_sb, + __sb_start_write(file_inode(req->file)->i_sb, SB_FREEZE_WRITE, true); - __sb_writers_release(file_inode(file)->i_sb, + __sb_writers_release(file_inode(req->file)->i_sb, SB_FREEZE_WRITE); } kiocb->ki_flags |= IOCB_WRITE; - if (file->f_op->write_iter) - ret2 = call_write_iter(file, kiocb, &iter); + if (req->file->f_op->write_iter) + ret2 = call_write_iter(req->file, kiocb, &iter); else - ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); + ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter); if (!force_nonblock || ret2 != -EAGAIN) { kiocb_done(kiocb, ret2, nxt, req->in_async); } else { copy_iov: - ret = io_setup_async_io(req, io_size, iovec, + ret = io_setup_async_rw(req, io_size, iovec, inline_vecs, &iter); if (ret) goto out_free; @@ -1908,7 +1986,8 @@ copy_iov: } } out_free: - kfree(iovec); + if (!io_wq_current_is_worker()) + kfree(iovec); return ret; } @@ -1939,45 +2018,92 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) return -EINVAL; + req->sync.flags = READ_ONCE(sqe->fsync_flags); + if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) + return -EINVAL; + + req->sync.off = READ_ONCE(sqe->off); + req->sync.len = READ_ONCE(sqe->len); return 0; } -static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static bool io_req_cancelled(struct io_kiocb *req) { - loff_t sqe_off = READ_ONCE(sqe->off); - loff_t sqe_len = READ_ONCE(sqe->len); - loff_t end = sqe_off + sqe_len; - unsigned fsync_flags; - int ret; + if (req->work.flags & IO_WQ_WORK_CANCEL) { + req_set_fail_links(req); + io_cqring_add_event(req, -ECANCELED); + io_put_req(req); + return true; + } - fsync_flags = READ_ONCE(sqe->fsync_flags); - if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC)) - return -EINVAL; + return false; +} - ret = io_prep_fsync(req, sqe); - if (ret) - return ret; +static void io_link_work_cb(struct io_wq_work **workptr) +{ + struct io_wq_work *work = *workptr; + struct io_kiocb *link = work->data; - /* fsync always requires a blocking context */ - if (force_nonblock) - return -EAGAIN; + io_queue_linked_timeout(link); + work->func = io_wq_submit_work; +} - ret = vfs_fsync_range(req->rw.ki_filp, sqe_off, - end > 0 ? end : LLONG_MAX, - fsync_flags & IORING_FSYNC_DATASYNC); +static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt) +{ + struct io_kiocb *link; + + io_prep_async_work(nxt, &link); + *workptr = &nxt->work; + if (link) { + nxt->work.flags |= IO_WQ_WORK_CB; + nxt->work.func = io_link_work_cb; + nxt->work.data = link; + } +} + +static void io_fsync_finish(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + loff_t end = req->sync.off + req->sync.len; + struct io_kiocb *nxt = NULL; + int ret; + + if (io_req_cancelled(req)) + return; + ret = vfs_fsync_range(req->file, req->sync.off, + end > 0 ? end : LLONG_MAX, + req->sync.flags & IORING_FSYNC_DATASYNC); if (ret < 0) req_set_fail_links(req); io_cqring_add_event(req, ret); - io_put_req_find_next(req, nxt); + io_put_req_find_next(req, &nxt); + if (nxt) + io_wq_assign_next(workptr, nxt); +} + +static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) +{ + struct io_wq_work *work, *old_work; + + /* fsync always requires a blocking context */ + if (force_nonblock) { + io_put_req(req); + req->work.func = io_fsync_finish; + return -EAGAIN; + } + + work = old_work = &req->work; + io_fsync_finish(&work); + if (work && work != old_work) + *nxt = container_of(work, struct io_kiocb, work); return 0; } static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; - int ret = 0; if (!req->file) return -EBADF; @@ -1987,60 +2113,88 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) return -EINVAL; - return ret; + req->sync.off = READ_ONCE(sqe->off); + req->sync.len = READ_ONCE(sqe->len); + req->sync.flags = READ_ONCE(sqe->sync_range_flags); + return 0; } -static int io_sync_file_range(struct io_kiocb *req, - const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, - bool force_nonblock) +static void io_sync_file_range_finish(struct io_wq_work **workptr) { - loff_t sqe_off; - loff_t sqe_len; - unsigned flags; + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + struct io_kiocb *nxt = NULL; int ret; - ret = io_prep_sfr(req, sqe); - if (ret) - return ret; + if (io_req_cancelled(req)) + return; + + ret = sync_file_range(req->file, req->sync.off, req->sync.len, + req->sync.flags); + if (ret < 0) + req_set_fail_links(req); + io_cqring_add_event(req, ret); + io_put_req_find_next(req, &nxt); + if (nxt) + io_wq_assign_next(workptr, nxt); +} + +static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) +{ + struct io_wq_work *work, *old_work; /* sync_file_range always requires a blocking context */ - if (force_nonblock) + if (force_nonblock) { + io_put_req(req); + req->work.func = io_sync_file_range_finish; return -EAGAIN; + } - sqe_off = READ_ONCE(sqe->off); - sqe_len = READ_ONCE(sqe->len); - flags = READ_ONCE(sqe->sync_range_flags); + work = old_work = &req->work; + io_sync_file_range_finish(&work); + if (work && work != old_work) + *nxt = container_of(work, struct io_kiocb, work); + return 0; +} - ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags); +#if defined(CONFIG_NET) +static void io_sendrecv_async(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + struct iovec *iov = NULL; - if (ret < 0) - req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req_find_next(req, nxt); - return 0; + if (req->io->rw.iov != req->io->rw.fast_iov) + iov = req->io->msg.iov; + io_wq_submit_work(workptr); + kfree(iov); } +#endif -static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io) +static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { #if defined(CONFIG_NET) - const struct io_uring_sqe *sqe = req->sqe; - struct user_msghdr __user *msg; - unsigned flags; + struct io_sr_msg *sr = &req->sr_msg; + struct io_async_ctx *io = req->io; + + sr->msg_flags = READ_ONCE(sqe->msg_flags); + sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr)); + + if (!io) + return 0; - flags = READ_ONCE(sqe->msg_flags); - msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr); io->msg.iov = io->msg.fast_iov; - return sendmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.iov); + return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags, + &io->msg.iov); #else - return 0; + return -EOPNOTSUPP; #endif } -static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { #if defined(CONFIG_NET) + struct io_async_msghdr *kmsg = NULL; struct socket *sock; int ret; @@ -2049,46 +2203,52 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, sock = sock_from_file(req->file, &ret); if (sock) { - struct io_async_ctx io, *copy; + struct io_async_ctx io; struct sockaddr_storage addr; - struct msghdr *kmsg; unsigned flags; - flags = READ_ONCE(sqe->msg_flags); - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) - flags |= MSG_DONTWAIT; - if (req->io) { - kmsg = &req->io->msg.msg; - kmsg->msg_name = &addr; + kmsg = &req->io->msg; + kmsg->msg.msg_name = &addr; + /* if iov is set, it's allocated already */ + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; } else { - kmsg = &io.msg.msg; - kmsg->msg_name = &addr; - ret = io_sendmsg_prep(req, &io); + struct io_sr_msg *sr = &req->sr_msg; + + kmsg = &io.msg; + kmsg->msg.msg_name = &addr; + + io.msg.iov = io.msg.fast_iov; + ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg, + sr->msg_flags, &io.msg.iov); if (ret) - goto out; + return ret; } - ret = __sys_sendmsg_sock(sock, kmsg, flags); + flags = req->sr_msg.msg_flags; + if (flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= MSG_DONTWAIT; + + ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); if (force_nonblock && ret == -EAGAIN) { - copy = kmalloc(sizeof(*copy), GFP_KERNEL); - if (!copy) { - ret = -ENOMEM; - goto out; - } - memcpy(©->msg, &io.msg, sizeof(copy->msg)); - req->io = copy; - memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); - req->sqe = &req->io->sqe; - return ret; + if (req->io) + return -EAGAIN; + if (io_alloc_async_ctx(req)) + return -ENOMEM; + memcpy(&req->io->msg, &io.msg, sizeof(io.msg)); + req->work.func = io_sendrecv_async; + return -EAGAIN; } if (ret == -ERESTARTSYS) ret = -EINTR; } -out: + if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); @@ -2099,27 +2259,32 @@ out: #endif } -static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io) +static int io_recvmsg_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) { #if defined(CONFIG_NET) - const struct io_uring_sqe *sqe = req->sqe; - struct user_msghdr __user *msg; - unsigned flags; + struct io_sr_msg *sr = &req->sr_msg; + struct io_async_ctx *io = req->io; + + sr->msg_flags = READ_ONCE(sqe->msg_flags); + sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr)); + + if (!io) + return 0; - flags = READ_ONCE(sqe->msg_flags); - msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr); io->msg.iov = io->msg.fast_iov; - return recvmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.uaddr, - &io->msg.iov); + return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags, + &io->msg.uaddr, &io->msg.iov); #else - return 0; + return -EOPNOTSUPP; #endif } -static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { #if defined(CONFIG_NET) + struct io_async_msghdr *kmsg = NULL; struct socket *sock; int ret; @@ -2128,49 +2293,54 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, sock = sock_from_file(req->file, &ret); if (sock) { - struct user_msghdr __user *msg; - struct io_async_ctx io, *copy; + struct io_async_ctx io; struct sockaddr_storage addr; - struct msghdr *kmsg; unsigned flags; - flags = READ_ONCE(sqe->msg_flags); - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) - flags |= MSG_DONTWAIT; - - msg = (struct user_msghdr __user *) (unsigned long) - READ_ONCE(sqe->addr); if (req->io) { - kmsg = &req->io->msg.msg; - kmsg->msg_name = &addr; + kmsg = &req->io->msg; + kmsg->msg.msg_name = &addr; + /* if iov is set, it's allocated already */ + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; } else { - kmsg = &io.msg.msg; - kmsg->msg_name = &addr; - ret = io_recvmsg_prep(req, &io); + struct io_sr_msg *sr = &req->sr_msg; + + kmsg = &io.msg; + kmsg->msg.msg_name = &addr; + + io.msg.iov = io.msg.fast_iov; + ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg, + sr->msg_flags, &io.msg.uaddr, + &io.msg.iov); if (ret) - goto out; + return ret; } - ret = __sys_recvmsg_sock(sock, kmsg, msg, io.msg.uaddr, flags); + flags = req->sr_msg.msg_flags; + if (flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= MSG_DONTWAIT; + + ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg, + kmsg->uaddr, flags); if (force_nonblock && ret == -EAGAIN) { - copy = kmalloc(sizeof(*copy), GFP_KERNEL); - if (!copy) { - ret = -ENOMEM; - goto out; - } - memcpy(copy, &io, sizeof(*copy)); - req->io = copy; - memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); - req->sqe = &req->io->sqe; - return ret; + if (req->io) + return -EAGAIN; + if (io_alloc_async_ctx(req)) + return -ENOMEM; + memcpy(&req->io->msg, &io.msg, sizeof(io.msg)); + req->work.func = io_sendrecv_async; + return -EAGAIN; } if (ret == -ERESTARTSYS) ret = -EINTR; } -out: + if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); @@ -2181,30 +2351,38 @@ out: #endif } -static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { #if defined(CONFIG_NET) - struct sockaddr __user *addr; - int __user *addr_len; - unsigned file_flags; - int flags, ret; + struct io_accept *accept = &req->accept; if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) return -EINVAL; if (sqe->ioprio || sqe->len || sqe->buf_index) return -EINVAL; - addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr); - addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2); - flags = READ_ONCE(sqe->accept_flags); - file_flags = force_nonblock ? O_NONBLOCK : 0; + accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); + accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + accept->flags = READ_ONCE(sqe->accept_flags); + return 0; +#else + return -EOPNOTSUPP; +#endif +} - ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags); - if (ret == -EAGAIN && force_nonblock) { - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; +#if defined(CONFIG_NET) +static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) +{ + struct io_accept *accept = &req->accept; + unsigned file_flags; + int ret; + + file_flags = force_nonblock ? O_NONBLOCK : 0; + ret = __sys_accept4_file(req->file, file_flags, accept->addr, + accept->addr_len, accept->flags); + if (ret == -EAGAIN && force_nonblock) return -EAGAIN; - } if (ret == -ERESTARTSYS) ret = -EINTR; if (ret < 0) @@ -2212,63 +2390,95 @@ static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe, io_cqring_add_event(req, ret); io_put_req_find_next(req, nxt); return 0; -#else - return -EOPNOTSUPP; -#endif } -static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io) +static void io_accept_finish(struct io_wq_work **workptr) +{ + struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); + struct io_kiocb *nxt = NULL; + + if (io_req_cancelled(req)) + return; + __io_accept(req, &nxt, false); + if (nxt) + io_wq_assign_next(workptr, nxt); +} +#endif + +static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { #if defined(CONFIG_NET) - const struct io_uring_sqe *sqe = req->sqe; - struct sockaddr __user *addr; - int addr_len; + int ret; - addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr); - addr_len = READ_ONCE(sqe->addr2); - return move_addr_to_kernel(addr, addr_len, &io->connect.address); -#else + ret = __io_accept(req, nxt, force_nonblock); + if (ret == -EAGAIN && force_nonblock) { + req->work.func = io_accept_finish; + req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + io_put_req(req); + return -EAGAIN; + } return 0; +#else + return -EOPNOTSUPP; #endif } -static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) +static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { #if defined(CONFIG_NET) - struct io_async_ctx __io, *io; - unsigned file_flags; - int addr_len, ret; + struct io_connect *conn = &req->connect; + struct io_async_ctx *io = req->io; if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) return -EINVAL; if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags) return -EINVAL; - addr_len = READ_ONCE(sqe->addr2); - file_flags = force_nonblock ? O_NONBLOCK : 0; + conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); + conn->addr_len = READ_ONCE(sqe->addr2); + + if (!io) + return 0; + + return move_addr_to_kernel(conn->addr, conn->addr_len, + &io->connect.address); +#else + return -EOPNOTSUPP; +#endif +} + +static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) +{ +#if defined(CONFIG_NET) + struct io_async_ctx __io, *io; + unsigned file_flags; + int ret; if (req->io) { io = req->io; } else { - ret = io_connect_prep(req, &__io); + ret = move_addr_to_kernel(req->connect.addr, + req->connect.addr_len, + &__io.connect.address); if (ret) goto out; io = &__io; } - ret = __sys_connect_file(req->file, &io->connect.address, addr_len, - file_flags); + file_flags = force_nonblock ? O_NONBLOCK : 0; + + ret = __sys_connect_file(req->file, &io->connect.address, + req->connect.addr_len, file_flags); if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) { + if (req->io) + return -EAGAIN; + if (io_alloc_async_ctx(req)) { ret = -ENOMEM; goto out; } - memcpy(&io->connect, &__io.connect, sizeof(io->connect)); - req->io = io; - memcpy(&io->sqe, req->sqe, sizeof(*req->sqe)); - req->sqe = &io->sqe; + memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect)); return -EAGAIN; } if (ret == -ERESTARTSYS) @@ -2331,23 +2541,32 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) return -ENOENT; } +static int io_poll_remove_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || + sqe->poll_events) + return -EINVAL; + + req->poll.addr = READ_ONCE(sqe->addr); + return 0; +} + /* * Find a running poll command that matches one specified in sqe->addr, * and remove it if found. */ -static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_poll_remove(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; + u64 addr; int ret; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || - sqe->poll_events) - return -EINVAL; - + addr = req->poll.addr; spin_lock_irq(&ctx->completion_lock); - ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr)); + ret = io_poll_cancel(ctx, addr); spin_unlock_irq(&ctx->completion_lock); io_cqring_add_event(req, ret); @@ -2413,7 +2632,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr) req_set_fail_links(req); io_put_req_find_next(req, &nxt); if (nxt) - *workptr = &nxt->work; + io_wq_assign_next(workptr, nxt); } static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, @@ -2482,14 +2701,9 @@ static void io_poll_req_insert(struct io_kiocb *req) hlist_add_head(&req->hash_node, list); } -static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt) +static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_poll_iocb *poll = &req->poll; - struct io_ring_ctx *ctx = req->ctx; - struct io_poll_table ipt; - bool cancel = false; - __poll_t mask; u16 events; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) @@ -2499,10 +2713,20 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (!poll->file) return -EBADF; - req->io = NULL; - INIT_IO_WORK(&req->work, io_poll_complete_work); events = READ_ONCE(sqe->poll_events); poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; + return 0; +} + +static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt) +{ + struct io_poll_iocb *poll = &req->poll; + struct io_ring_ctx *ctx = req->ctx; + struct io_poll_table ipt; + bool cancel = false; + __poll_t mask; + + INIT_IO_WORK(&req->work, io_poll_complete_work); INIT_HLIST_NODE(&req->hash_node); poll->head = NULL; @@ -2573,7 +2797,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) /* * Adjust the reqs sequence before the current one because it - * will consume a slot in the cq_ring and the the cq_tail + * will consume a slot in the cq_ring and the cq_tail * pointer will be increased, otherwise other timeout reqs may * return in advance without waiting for enough wait_nr. */ @@ -2619,26 +2843,32 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) return 0; } +static int io_timeout_remove_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) + return -EINVAL; + + req->timeout.addr = READ_ONCE(sqe->addr); + req->timeout.flags = READ_ONCE(sqe->timeout_flags); + if (req->timeout.flags) + return -EINVAL; + + return 0; +} + /* * Remove or update an existing timeout command */ -static int io_timeout_remove(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static int io_timeout_remove(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - unsigned flags; int ret; - if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) - return -EINVAL; - flags = READ_ONCE(sqe->timeout_flags); - if (flags) - return -EINVAL; - spin_lock_irq(&ctx->completion_lock); - ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr)); + ret = io_timeout_cancel(ctx, req->timeout.addr); io_cqring_fill_event(req, ret); io_commit_cqring(ctx); @@ -2650,10 +2880,9 @@ static int io_timeout_remove(struct io_kiocb *req, return 0; } -static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io, +static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, bool is_timeout_link) { - const struct io_uring_sqe *sqe = req->sqe; struct io_timeout_data *data; unsigned flags; @@ -2667,7 +2896,12 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io, if (flags & ~IORING_TIMEOUT_ABS) return -EINVAL; - data = &io->timeout; + req->timeout.count = READ_ONCE(sqe->off); + + if (!req->io && io_alloc_async_ctx(req)) + return -ENOMEM; + + data = &req->io->timeout; data->req = req; req->flags |= REQ_F_TIMEOUT; @@ -2680,32 +2914,17 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io, data->mode = HRTIMER_MODE_REL; hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); - req->io = io; return 0; } -static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_timeout(struct io_kiocb *req) { unsigned count; struct io_ring_ctx *ctx = req->ctx; struct io_timeout_data *data; - struct io_async_ctx *io; struct list_head *entry; unsigned span = 0; - io = req->io; - if (!io) { - int ret; - - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) - return -ENOMEM; - ret = io_timeout_prep(req, io, false); - if (ret) { - kfree(io); - return ret; - } - } data = &req->io->timeout; /* @@ -2713,7 +2932,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) * timeout event to be satisfied. If it isn't set, then this is * a pure timeout request, sequence isn't used. */ - count = READ_ONCE(sqe->off); + count = req->timeout.count; if (!count) { req->flags |= REQ_F_TIMEOUT_NOSEQ; spin_lock_irq(&ctx->completion_lock); @@ -2831,84 +3050,104 @@ done: io_put_req_find_next(req, nxt); } -static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt) +static int io_async_cancel_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) { - struct io_ring_ctx *ctx = req->ctx; - - if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags) return -EINVAL; - io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0); + req->cancel.addr = READ_ONCE(sqe->addr); return 0; } -static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) +static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; - struct iov_iter iter; - ssize_t ret; + struct io_ring_ctx *ctx = req->ctx; - memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); - req->sqe = &io->sqe; + io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0); + return 0; +} + +static int io_req_defer_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + ssize_t ret = 0; - switch (io->sqe.opcode) { + switch (req->opcode) { + case IORING_OP_NOP: + break; case IORING_OP_READV: case IORING_OP_READ_FIXED: - ret = io_read_prep(req, &iovec, &iter, true); + ret = io_read_prep(req, sqe, true); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: - ret = io_write_prep(req, &iovec, &iter, true); + ret = io_write_prep(req, sqe, true); + break; + case IORING_OP_POLL_ADD: + ret = io_poll_add_prep(req, sqe); + break; + case IORING_OP_POLL_REMOVE: + ret = io_poll_remove_prep(req, sqe); + break; + case IORING_OP_FSYNC: + ret = io_prep_fsync(req, sqe); + break; + case IORING_OP_SYNC_FILE_RANGE: + ret = io_prep_sfr(req, sqe); break; case IORING_OP_SENDMSG: - ret = io_sendmsg_prep(req, io); + ret = io_sendmsg_prep(req, sqe); break; case IORING_OP_RECVMSG: - ret = io_recvmsg_prep(req, io); + ret = io_recvmsg_prep(req, sqe); break; case IORING_OP_CONNECT: - ret = io_connect_prep(req, io); + ret = io_connect_prep(req, sqe); break; case IORING_OP_TIMEOUT: - return io_timeout_prep(req, io, false); + ret = io_timeout_prep(req, sqe, false); + break; + case IORING_OP_TIMEOUT_REMOVE: + ret = io_timeout_remove_prep(req, sqe); + break; + case IORING_OP_ASYNC_CANCEL: + ret = io_async_cancel_prep(req, sqe); + break; case IORING_OP_LINK_TIMEOUT: - return io_timeout_prep(req, io, true); + ret = io_timeout_prep(req, sqe, true); + break; + case IORING_OP_ACCEPT: + ret = io_accept_prep(req, sqe); + break; default: - req->io = io; - return 0; + printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", + req->opcode); + ret = -EINVAL; + break; } - if (ret < 0) - return ret; - - req->io = io; - io_req_map_io(req, ret, iovec, inline_vecs, &iter); - return 0; + return ret; } -static int io_req_defer(struct io_kiocb *req) +static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; - struct io_async_ctx *io; int ret; /* Still need defer if there is pending req in defer list. */ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) return 0; - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) + if (!req->io && io_alloc_async_ctx(req)) return -EAGAIN; - ret = io_req_defer_prep(req, io); - if (ret < 0) { - kfree(io); + ret = io_req_defer_prep(req, sqe); + if (ret < 0) return ret; - } spin_lock_irq(&ctx->completion_lock); if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { @@ -2922,66 +3161,121 @@ static int io_req_defer(struct io_kiocb *req) return -EIOCBQUEUED; } -__attribute__((nonnull)) -static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt, - bool force_nonblock) +static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt, bool force_nonblock) { - int ret, opcode; struct io_ring_ctx *ctx = req->ctx; + int ret; - opcode = READ_ONCE(req->sqe->opcode); - switch (opcode) { + switch (req->opcode) { case IORING_OP_NOP: ret = io_nop(req); break; case IORING_OP_READV: - if (unlikely(req->sqe->buf_index)) - return -EINVAL; - ret = io_read(req, nxt, force_nonblock); - break; - case IORING_OP_WRITEV: - if (unlikely(req->sqe->buf_index)) - return -EINVAL; - ret = io_write(req, nxt, force_nonblock); - break; case IORING_OP_READ_FIXED: + if (sqe) { + ret = io_read_prep(req, sqe, force_nonblock); + if (ret < 0) + break; + } ret = io_read(req, nxt, force_nonblock); break; + case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: + if (sqe) { + ret = io_write_prep(req, sqe, force_nonblock); + if (ret < 0) + break; + } ret = io_write(req, nxt, force_nonblock); break; case IORING_OP_FSYNC: - ret = io_fsync(req, req->sqe, nxt, force_nonblock); + if (sqe) { + ret = io_prep_fsync(req, sqe); + if (ret < 0) + break; + } + ret = io_fsync(req, nxt, force_nonblock); break; case IORING_OP_POLL_ADD: - ret = io_poll_add(req, req->sqe, nxt); + if (sqe) { + ret = io_poll_add_prep(req, sqe); + if (ret) + break; + } + ret = io_poll_add(req, nxt); break; case IORING_OP_POLL_REMOVE: - ret = io_poll_remove(req, req->sqe); + if (sqe) { + ret = io_poll_remove_prep(req, sqe); + if (ret < 0) + break; + } + ret = io_poll_remove(req); break; case IORING_OP_SYNC_FILE_RANGE: - ret = io_sync_file_range(req, req->sqe, nxt, force_nonblock); + if (sqe) { + ret = io_prep_sfr(req, sqe); + if (ret < 0) + break; + } + ret = io_sync_file_range(req, nxt, force_nonblock); break; case IORING_OP_SENDMSG: - ret = io_sendmsg(req, req->sqe, nxt, force_nonblock); + if (sqe) { + ret = io_sendmsg_prep(req, sqe); + if (ret < 0) + break; + } + ret = io_sendmsg(req, nxt, force_nonblock); break; case IORING_OP_RECVMSG: - ret = io_recvmsg(req, req->sqe, nxt, force_nonblock); + if (sqe) { + ret = io_recvmsg_prep(req, sqe); + if (ret) + break; + } + ret = io_recvmsg(req, nxt, force_nonblock); break; case IORING_OP_TIMEOUT: - ret = io_timeout(req, req->sqe); + if (sqe) { + ret = io_timeout_prep(req, sqe, false); + if (ret) + break; + } + ret = io_timeout(req); break; case IORING_OP_TIMEOUT_REMOVE: - ret = io_timeout_remove(req, req->sqe); + if (sqe) { + ret = io_timeout_remove_prep(req, sqe); + if (ret) + break; + } + ret = io_timeout_remove(req); break; case IORING_OP_ACCEPT: - ret = io_accept(req, req->sqe, nxt, force_nonblock); + if (sqe) { + ret = io_accept_prep(req, sqe); + if (ret) + break; + } + ret = io_accept(req, nxt, force_nonblock); break; case IORING_OP_CONNECT: - ret = io_connect(req, req->sqe, nxt, force_nonblock); + if (sqe) { + ret = io_connect_prep(req, sqe); + if (ret) + break; + } + ret = io_connect(req, nxt, force_nonblock); break; case IORING_OP_ASYNC_CANCEL: - ret = io_async_cancel(req, req->sqe, nxt); + if (sqe) { + ret = io_async_cancel_prep(req, sqe); + if (ret) + break; + } + ret = io_async_cancel(req, nxt); break; default: ret = -EINVAL; @@ -2992,24 +3286,24 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt, return ret; if (ctx->flags & IORING_SETUP_IOPOLL) { + const bool in_async = io_wq_current_is_worker(); + if (req->result == -EAGAIN) return -EAGAIN; + /* workqueue context doesn't hold uring_lock, grab it now */ + if (in_async) + mutex_lock(&ctx->uring_lock); + io_iopoll_req_issued(req); + + if (in_async) + mutex_unlock(&ctx->uring_lock); } return 0; } -static void io_link_work_cb(struct io_wq_work **workptr) -{ - struct io_wq_work *work = *workptr; - struct io_kiocb *link = work->data; - - io_queue_linked_timeout(link); - work->func = io_wq_submit_work; -} - static void io_wq_submit_work(struct io_wq_work **workptr) { struct io_wq_work *work = *workptr; @@ -3017,9 +3311,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr) struct io_kiocb *nxt = NULL; int ret = 0; - /* Ensure we clear previously set non-block flag */ - req->rw.ki_flags &= ~IOCB_NOWAIT; - if (work->flags & IO_WQ_WORK_CANCEL) ret = -ECANCELED; @@ -3027,7 +3318,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr) req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0; req->in_async = true; do { - ret = io_issue_sqe(req, &nxt, false); + ret = io_issue_sqe(req, NULL, &nxt, false); /* * We can get EAGAIN for polled IO even though we're * forcing a sync submission from here, since we can't @@ -3049,17 +3340,8 @@ static void io_wq_submit_work(struct io_wq_work **workptr) } /* if a dependent link is ready, pass it back */ - if (!ret && nxt) { - struct io_kiocb *link; - - io_prep_async_work(nxt, &link); - *workptr = &nxt->work; - if (link) { - nxt->work.flags |= IO_WQ_WORK_CB; - nxt->work.func = io_link_work_cb; - nxt->work.data = link; - } - } + if (!ret && nxt) + io_wq_assign_next(workptr, nxt); } static bool io_req_op_valid(int op) @@ -3067,11 +3349,9 @@ static bool io_req_op_valid(int op) return op >= IORING_OP_NOP && op < IORING_OP_LAST; } -static int io_op_needs_file(const struct io_uring_sqe *sqe) +static int io_req_needs_file(struct io_kiocb *req) { - int op = READ_ONCE(sqe->opcode); - - switch (op) { + switch (req->opcode) { case IORING_OP_NOP: case IORING_OP_POLL_REMOVE: case IORING_OP_TIMEOUT: @@ -3080,7 +3360,7 @@ static int io_op_needs_file(const struct io_uring_sqe *sqe) case IORING_OP_LINK_TIMEOUT: return 0; default: - if (io_req_op_valid(op)) + if (io_req_op_valid(req->opcode)) return 1; return -EINVAL; } @@ -3095,19 +3375,20 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, return table->files[index & IORING_FILE_TABLE_MASK]; } -static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req) +static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, + const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; unsigned flags; int fd, ret; - flags = READ_ONCE(req->sqe->flags); - fd = READ_ONCE(req->sqe->fd); + flags = READ_ONCE(sqe->flags); + fd = READ_ONCE(sqe->fd); if (flags & IOSQE_IO_DRAIN) req->flags |= REQ_F_IO_DRAIN; - ret = io_op_needs_file(req->sqe); + ret = io_req_needs_file(req); if (ret <= 0) return ret; @@ -3227,14 +3508,14 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, link_list); - if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT) + if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT) return NULL; req->flags |= REQ_F_LINK_TIMEOUT; return nxt; } -static void __io_queue_sqe(struct io_kiocb *req) +static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_kiocb *linked_timeout; struct io_kiocb *nxt = NULL; @@ -3243,7 +3524,7 @@ static void __io_queue_sqe(struct io_kiocb *req) again: linked_timeout = io_prep_linked_timeout(req); - ret = io_issue_sqe(req, &nxt, true); + ret = io_issue_sqe(req, sqe, &nxt, true); /* * We async punt it if the file wasn't marked NOWAIT, or if the file @@ -3290,7 +3571,7 @@ done_req: } } -static void io_queue_sqe(struct io_kiocb *req) +static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) { int ret; @@ -3300,7 +3581,7 @@ static void io_queue_sqe(struct io_kiocb *req) } req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK); - ret = io_req_defer(req); + ret = io_req_defer(req, sqe); if (ret) { if (ret != -EIOCBQUEUED) { io_cqring_add_event(req, ret); @@ -3308,7 +3589,7 @@ static void io_queue_sqe(struct io_kiocb *req) io_double_put_req(req); } } else - __io_queue_sqe(req); + __io_queue_sqe(req, sqe); } static inline void io_queue_link_head(struct io_kiocb *req) @@ -3317,27 +3598,25 @@ static inline void io_queue_link_head(struct io_kiocb *req) io_cqring_add_event(req, -ECANCELED); io_double_put_req(req); } else - io_queue_sqe(req); + io_queue_sqe(req, NULL); } #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ IOSQE_IO_HARDLINK) -static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, - struct io_kiocb **link) +static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_submit_state *state, struct io_kiocb **link) { struct io_ring_ctx *ctx = req->ctx; int ret; - req->user_data = req->sqe->user_data; - /* enforce forwards compatibility on users */ - if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) { + if (unlikely(sqe->flags & ~SQE_VALID_FLAGS)) { ret = -EINVAL; goto err_req; } - ret = io_req_set_file(state, req); + ret = io_req_set_file(state, req, sqe); if (unlikely(ret)) { err_req: io_cqring_add_event(req, ret); @@ -3354,38 +3633,38 @@ err_req: */ if (*link) { struct io_kiocb *prev = *link; - struct io_async_ctx *io; - if (req->sqe->flags & IOSQE_IO_DRAIN) + if (sqe->flags & IOSQE_IO_DRAIN) (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN; - if (req->sqe->flags & IOSQE_IO_HARDLINK) + if (sqe->flags & IOSQE_IO_HARDLINK) req->flags |= REQ_F_HARDLINK; - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) { + if (io_alloc_async_ctx(req)) { ret = -EAGAIN; goto err_req; } - ret = io_req_defer_prep(req, io); + ret = io_req_defer_prep(req, sqe); if (ret) { - kfree(io); /* fail even hard links since we don't submit */ prev->flags |= REQ_F_FAIL_LINK; goto err_req; } trace_io_uring_link(ctx, req, prev); list_add_tail(&req->link_list, &prev->link_list); - } else if (req->sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) { + } else if (sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) { req->flags |= REQ_F_LINK; - if (req->sqe->flags & IOSQE_IO_HARDLINK) + if (sqe->flags & IOSQE_IO_HARDLINK) req->flags |= REQ_F_HARDLINK; INIT_LIST_HEAD(&req->link_list); + ret = io_req_defer_prep(req, sqe); + if (ret) + req->flags |= REQ_F_FAIL_LINK; *link = req; } else { - io_queue_sqe(req); + io_queue_sqe(req, sqe); } return true; @@ -3430,14 +3709,15 @@ static void io_commit_sqring(struct io_ring_ctx *ctx) } /* - * Fetch an sqe, if one is available. Note that s->sqe will point to memory + * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory * that is mapped by userspace. This means that care needs to be taken to * ensure that reads are stable, as we cannot rely on userspace always * being a good citizen. If members of the sqe are validated and then later * used, it's important that those reads are done through READ_ONCE() to * prevent a re-load down the line. */ -static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req) +static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req, + const struct io_uring_sqe **sqe_ptr) { struct io_rings *rings = ctx->rings; u32 *sq_array = ctx->sq_array; @@ -3464,7 +3744,9 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req) * link list. */ req->sequence = ctx->cached_sq_head; - req->sqe = &ctx->sq_sqes[head]; + *sqe_ptr = &ctx->sq_sqes[head]; + req->opcode = READ_ONCE((*sqe_ptr)->opcode); + req->user_data = READ_ONCE((*sqe_ptr)->user_data); ctx->cached_sq_head++; return true; } @@ -3496,6 +3778,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, } for (i = 0; i < nr; i++) { + const struct io_uring_sqe *sqe; struct io_kiocb *req; unsigned int sqe_flags; @@ -3505,12 +3788,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, submitted = -EAGAIN; break; } - if (!io_get_sqring(ctx, req)) { + if (!io_get_sqring(ctx, req, &sqe)) { __io_free_req(req); break; } - if (io_sqe_needs_user(req->sqe) && !*mm) { + if (io_req_needs_user(req) && !*mm) { mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm); if (!mm_fault) { use_mm(ctx->sqo_mm); @@ -3519,22 +3802,21 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, } submitted++; - sqe_flags = req->sqe->flags; + sqe_flags = sqe->flags; req->ring_file = ring_file; req->ring_fd = ring_fd; req->has_user = *mm != NULL; req->in_async = async; req->needs_fixed_file = async; - trace_io_uring_submit_sqe(ctx, req->sqe->user_data, - true, async); - if (!io_submit_sqe(req, statep, &link)) + trace_io_uring_submit_sqe(ctx, req->user_data, true, async); + if (!io_submit_sqe(req, sqe, statep, &link)) break; /* * If previous wasn't linked and we have a linked command, * that's the end of the chain. Submit the previous link. */ - if (!(sqe_flags & IOSQE_IO_LINK) && link) { + if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) && link) { io_queue_link_head(link); link = NULL; } @@ -3694,7 +3976,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush) struct io_ring_ctx *ctx = iowq->ctx; /* - * Wake up if we have enough events, or if a timeout occured since we + * Wake up if we have enough events, or if a timeout occurred since we * started waiting. For timeouts, we always want to return to userspace, * regardless of event count. */ @@ -4446,7 +4728,7 @@ static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov))) return -EFAULT; - dst->iov_base = (void __user *) (unsigned long) ciov.iov_base; + dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base); dst->iov_len = ciov.iov_len; return 0; } @@ -4877,6 +5159,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } else if (to_submit) { struct mm_struct *cur_mm; + if (current->mm != ctx->sqo_mm || + current_cred() != ctx->creds) { + ret = -EPERM; + goto out; + } + to_submit = min(to_submit, ctx->sq_entries); mutex_lock(&ctx->uring_lock); /* already have mm, so io_submit_sqes() won't try to grab it */ @@ -4884,6 +5172,9 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, submitted = io_submit_sqes(ctx, to_submit, f.file, fd, &cur_mm, false); mutex_unlock(&ctx->uring_lock); + + if (submitted != to_submit) + goto out; } if (flags & IORING_ENTER_GETEVENTS) { unsigned nr_events = 0; @@ -4897,6 +5188,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } } +out: percpu_ref_put(&ctx->refs); out_fput: fdput(f); diff --git a/fs/locks.c b/fs/locks.c index 6970f55daf54..44b6da032842 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2853,7 +2853,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } if (inode) { /* userspace relies on this representation of dev_t */ - seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, + seq_printf(f, "%d %02x:%02x:%lu ", fl_pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); } else { diff --git a/fs/mpage.c b/fs/mpage.c index a63620cdb73a..ccba3c4c4479 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) { bio->bi_end_io = mpage_end_io; bio_set_op_attrs(bio, op, op_flags); - guard_bio_eod(op, bio); + guard_bio_eod(bio); submit_bio(bio); return NULL; } diff --git a/fs/namei.c b/fs/namei.c index d6c91d1e88cb..d2720dc71d0e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1232,6 +1232,7 @@ static int follow_managed(struct path *path, struct nameidata *nd) BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path, false); + flags = smp_load_acquire(&path->dentry->d_flags); if (ret < 0) break; } @@ -1649,17 +1650,15 @@ again: if (IS_ERR(dentry)) return dentry; if (unlikely(!d_in_lookup(dentry))) { - if (!(flags & LOOKUP_NO_REVAL)) { - int error = d_revalidate(dentry, flags); - if (unlikely(error <= 0)) { - if (!error) { - d_invalidate(dentry); - dput(dentry); - goto again; - } + int error = d_revalidate(dentry, flags); + if (unlikely(error <= 0)) { + if (!error) { + d_invalidate(dentry); dput(dentry); - dentry = ERR_PTR(error); + goto again; } + dput(dentry); + dentry = ERR_PTR(error); } } else { old = inode->i_op->lookup(inode, dentry, flags); @@ -2618,72 +2617,6 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags, EXPORT_SYMBOL(user_path_at_empty); /** - * mountpoint_last - look up last component for umount - * @nd: pathwalk nameidata - currently pointing at parent directory of "last" - * - * This is a special lookup_last function just for umount. In this case, we - * need to resolve the path without doing any revalidation. - * - * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since - * mountpoints are always pinned in the dcache, their ancestors are too. Thus, - * in almost all cases, this lookup will be served out of the dcache. The only - * cases where it won't are if nd->last refers to a symlink or the path is - * bogus and it doesn't exist. - * - * Returns: - * -error: if there was an error during lookup. This includes -ENOENT if the - * lookup found a negative dentry. - * - * 0: if we successfully resolved nd->last and found it to not to be a - * symlink that needs to be followed. - * - * 1: if we successfully resolved nd->last and found it to be a symlink - * that needs to be followed. - */ -static int -mountpoint_last(struct nameidata *nd) -{ - int error = 0; - struct dentry *dir = nd->path.dentry; - struct path path; - - /* If we're in rcuwalk, drop out of it to handle last component */ - if (nd->flags & LOOKUP_RCU) { - if (unlazy_walk(nd)) - return -ECHILD; - } - - nd->flags &= ~LOOKUP_PARENT; - - if (unlikely(nd->last_type != LAST_NORM)) { - error = handle_dots(nd, nd->last_type); - if (error) - return error; - path.dentry = dget(nd->path.dentry); - } else { - path.dentry = d_lookup(dir, &nd->last); - if (!path.dentry) { - /* - * No cached dentry. Mounted dentries are pinned in the - * cache, so that means that this dentry is probably - * a symlink or the path doesn't actually point - * to a mounted dentry. - */ - path.dentry = lookup_slow(&nd->last, dir, - nd->flags | LOOKUP_NO_REVAL); - if (IS_ERR(path.dentry)) - return PTR_ERR(path.dentry); - } - } - if (d_flags_negative(smp_load_acquire(&path.dentry->d_flags))) { - dput(path.dentry); - return -ENOENT; - } - path.mnt = nd->path.mnt; - return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0); -} - -/** * path_mountpoint - look up a path to be umounted * @nd: lookup context * @flags: lookup flags @@ -2699,14 +2632,17 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path) int err; while (!(err = link_path_walk(s, nd)) && - (err = mountpoint_last(nd)) > 0) { + (err = lookup_last(nd)) > 0) { s = trailing_symlink(nd); } + if (!err && (nd->flags & LOOKUP_RCU)) + err = unlazy_walk(nd); + if (!err) + err = handle_lookup_down(nd); if (!err) { *path = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; - follow_mount(path); } terminate_walk(nd); return err; diff --git a/fs/namespace.c b/fs/namespace.c index be601d3a8008..5e1bf611a9eb 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1728,7 +1728,7 @@ static bool is_mnt_ns_file(struct dentry *dentry) dentry->d_fsdata == &mntns_operations; } -struct mnt_namespace *to_mnt_ns(struct ns_common *ns) +static struct mnt_namespace *to_mnt_ns(struct ns_common *ns) { return container_of(ns, struct mnt_namespace, ns); } diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index f64a33d2a1d1..2a82dcce5fc1 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -206,7 +206,6 @@ TRACE_DEFINE_ENUM(LOOKUP_AUTOMOUNT); TRACE_DEFINE_ENUM(LOOKUP_PARENT); TRACE_DEFINE_ENUM(LOOKUP_REVAL); TRACE_DEFINE_ENUM(LOOKUP_RCU); -TRACE_DEFINE_ENUM(LOOKUP_NO_REVAL); TRACE_DEFINE_ENUM(LOOKUP_OPEN); TRACE_DEFINE_ENUM(LOOKUP_CREATE); TRACE_DEFINE_ENUM(LOOKUP_EXCL); @@ -224,7 +223,6 @@ TRACE_DEFINE_ENUM(LOOKUP_DOWN); { LOOKUP_PARENT, "PARENT" }, \ { LOOKUP_REVAL, "REVAL" }, \ { LOOKUP_RCU, "RCU" }, \ - { LOOKUP_NO_REVAL, "NO_REVAL" }, \ { LOOKUP_OPEN, "OPEN" }, \ { LOOKUP_CREATE, "CREATE" }, \ { LOOKUP_EXCL, "EXCL" }, \ diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 3e77b728a22b..46f225580009 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -57,6 +57,9 @@ static void fsnotify_unmount_inodes(struct super_block *sb) * doing an __iget/iput with SB_ACTIVE clear would actually * evict all inodes with zero i_count from icache which is * unnecessarily violent and may in fact be illegal to do. + * However, we should have been called /after/ evict_inodes + * removed all zero refcount inodes, in any case. Test to + * be sure. */ if (!atomic_read(&inode->i_count)) { spin_unlock(&inode->i_lock); @@ -77,6 +80,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb) iput_inode = inode; + cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); diff --git a/fs/nsfs.c b/fs/nsfs.c index a0431642c6b5..f75767bd623a 100644 --- a/fs/nsfs.c +++ b/fs/nsfs.c @@ -3,6 +3,7 @@ #include <linux/pseudo_fs.h> #include <linux/file.h> #include <linux/fs.h> +#include <linux/proc_fs.h> #include <linux/proc_ns.h> #include <linux/magic.h> #include <linux/ktime.h> @@ -11,6 +12,8 @@ #include <linux/nsfs.h> #include <linux/uaccess.h> +#include "internal.h" + static struct vfsmount *nsfs_mnt; static long ns_ioctl(struct file *filp, unsigned int ioctl, diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 1c4c51f3df60..cda1027d0819 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -3282,6 +3282,7 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb) debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root, &dlm_debug->d_filter_secs); + ocfs2_get_dlm_debug(dlm_debug); } static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb) diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 1afe57f425a0..68ba354cf361 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); + if (replayed) { + jbd2_journal_lock_updates(journal->j_journal); + status = jbd2_journal_flush(journal->j_journal); + jbd2_journal_unlock_updates(journal->j_journal); + if (status < 0) + mlog_errno(status); + } + status = ocfs2_journal_toggle_dirty(osb, 1, replayed); if (status < 0) { mlog_errno(status); diff --git a/fs/pipe.c b/fs/pipe.c index 04d004ee2e8c..57502c3c0fba 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -581,7 +581,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) } wait_event_interruptible(pipe->wait, pipe_writable(pipe)); __pipe_lock(pipe); - was_empty = pipe_empty(head, pipe->tail); + was_empty = pipe_empty(pipe->head, pipe->tail); } out: __pipe_unlock(pipe); diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 84ad1c90d535..249672bf54fe 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -631,12 +631,15 @@ EXPORT_SYMBOL_GPL(posix_acl_create); /** * posix_acl_update_mode - update mode in set_acl + * @inode: target inode + * @mode_p: mode (pointer) for update + * @acl: acl pointer * * Update the file mode when setting an ACL: compute the new file permission * bits based on the ACL. In addition, if the ACL is equivalent to the new - * file mode, set *acl to NULL to indicate that no ACL should be set. + * file mode, set *@acl to NULL to indicate that no ACL should be set. * - * As with chmod, clear the setgit bit if the caller is not in the owning group + * As with chmod, clear the setgid bit if the caller is not in the owning group * or capable of CAP_FSETID (see inode_change_ok). * * Called from set_acl inode operations. diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 37bdbec5b402..fd931d3e77be 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -134,7 +134,7 @@ static int show_stat(struct seq_file *p, void *v) softirq += cpustat[CPUTIME_SOFTIRQ]; steal += cpustat[CPUTIME_STEAL]; guest += cpustat[CPUTIME_GUEST]; - guest_nice += cpustat[CPUTIME_USER]; + guest_nice += cpustat[CPUTIME_GUEST_NICE]; sum += kstat_cpu_irqs_sum(i); sum += arch_irq_stat_cpu(i); @@ -175,7 +175,7 @@ static int show_stat(struct seq_file *p, void *v) softirq = cpustat[CPUTIME_SOFTIRQ]; steal = cpustat[CPUTIME_STEAL]; guest = cpustat[CPUTIME_GUEST]; - guest_nice = cpustat[CPUTIME_USER]; + guest_nice = cpustat[CPUTIME_GUEST_NICE]; seq_printf(p, "cpu%d", i); seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 8caff834f002..013486b5125e 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -407,6 +407,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record) prz = cxt->dprzs[cxt->dump_write_cnt]; + /* + * Since this is a new crash dump, we need to reset the buffer in + * case it still has an old dump present. Without this, the new dump + * will get appended, which would seriously confuse anything trying + * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() + * expects to find a dump header in the beginning of buffer data, so + * we must to reset the buffer values, in order to ensure that the + * header will be written to the beginning of the buffer. + */ + persistent_ram_zap(prz); + /* Build header and append record contents. */ hlen = ramoops_write_kmsg_hdr(prz, record); if (!hlen) @@ -572,6 +583,7 @@ static int ramoops_init_przs(const char *name, prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, &cxt->ecc_info, cxt->memtype, flags, label); + kfree(label); if (IS_ERR(prz_ar[i])) { err = PTR_ERR(prz_ar[i]); dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", @@ -617,6 +629,7 @@ static int ramoops_init_prz(const char *name, label = kasprintf(GFP_KERNEL, "ramoops:%s", name); *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype, PRZ_FLAG_ZAP_OLD, label); + kfree(label); if (IS_ERR(*prz)) { int err = PTR_ERR(*prz); diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index 8823f65888f0..1f4d8c06f9be 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -574,7 +574,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, /* Initialize general buffer state. */ raw_spin_lock_init(&prz->buffer_lock); prz->flags = flags; - prz->label = label; + prz->label = kstrdup(label, GFP_KERNEL); ret = persistent_ram_buffer_map(start, size, prz, memtype); if (ret) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index b0688c02dc90..b6a4f692d345 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -984,6 +984,7 @@ static int add_dquot_ref(struct super_block *sb, int type) * later. */ old_inode = inode; + cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); diff --git a/fs/super.c b/fs/super.c index cfadab2cbf35..cd352530eca9 100644 --- a/fs/super.c +++ b/fs/super.c @@ -448,10 +448,12 @@ void generic_shutdown_super(struct super_block *sb) sync_filesystem(sb); sb->s_flags &= ~SB_ACTIVE; - fsnotify_sb_delete(sb); cgroup_writeback_umount(); + /* evict all inodes with zero refcount */ evict_inodes(sb); + /* only nonzero refcount inodes can have marks */ + fsnotify_sb_delete(sb); if (sb->s_dio_done_wq) { destroy_workqueue(sb->s_dio_done_wq); diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index c284e10af491..fc93fd88ec89 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2248,24 +2248,32 @@ xfs_alloc_longest_free_extent( return pag->pagf_flcount > 0 || pag->pagf_longest > 0; } +/* + * Compute the minimum length of the AGFL in the given AG. If @pag is NULL, + * return the largest possible minimum length. + */ unsigned int xfs_alloc_min_freelist( struct xfs_mount *mp, struct xfs_perag *pag) { + /* AG btrees have at least 1 level. */ + static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1}; + const uint8_t *levels = pag ? pag->pagf_levels : fake_levels; unsigned int min_free; + ASSERT(mp->m_ag_maxlevels > 0); + /* space needed by-bno freespace btree */ - min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1, + min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1, mp->m_ag_maxlevels); /* space needed by-size freespace btree */ - min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1, + min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1, mp->m_ag_maxlevels); /* space needed reverse mapping used space btree */ if (xfs_sb_version_hasrmapbt(&mp->m_sb)) - min_free += min_t(unsigned int, - pag->pagf_levels[XFS_BTNUM_RMAPi] + 1, - mp->m_rmap_maxlevels); + min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1, + mp->m_rmap_maxlevels); return min_free; } diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index a9ad1f991ba3..4c2e046fbfad 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -4561,7 +4561,7 @@ xfs_bmapi_convert_delalloc( struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); struct xfs_bmalloca bma = { NULL }; - u16 flags = 0; + uint16_t flags = 0; struct xfs_trans *tp; int error; @@ -5972,8 +5972,7 @@ xfs_bmap_insert_extents( goto del_cursor; } - if (XFS_IS_CORRUPT(mp, - stop_fsb >= got.br_startoff + got.br_blockcount)) { + if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) { error = -EFSCORRUPTED; goto del_cursor; } diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index 0aa87cbde49e..dd6fcaaea318 100644 --- a/fs/xfs/libxfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c @@ -724,3 +724,24 @@ xfs_dir2_namecheck( /* There shouldn't be any slashes or nulls here */ return !memchr(name, '/', length) && !memchr(name, 0, length); } + +xfs_dahash_t +xfs_dir2_hashname( + struct xfs_mount *mp, + struct xfs_name *name) +{ + if (unlikely(xfs_sb_version_hasasciici(&mp->m_sb))) + return xfs_ascii_ci_hashname(name); + return xfs_da_hashname(name->name, name->len); +} + +enum xfs_dacmp +xfs_dir2_compname( + struct xfs_da_args *args, + const unsigned char *name, + int len) +{ + if (unlikely(xfs_sb_version_hasasciici(&args->dp->i_mount->m_sb))) + return xfs_ascii_ci_compname(args, name, len); + return xfs_da_compname(args, name, len); +} diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h index c031c53d0f0d..01ee0b926572 100644 --- a/fs/xfs/libxfs/xfs_dir2_priv.h +++ b/fs/xfs/libxfs/xfs_dir2_priv.h @@ -175,6 +175,12 @@ extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); extern int xfs_dir2_sf_removename(struct xfs_da_args *args); extern int xfs_dir2_sf_replace(struct xfs_da_args *args); extern xfs_failaddr_t xfs_dir2_sf_verify(struct xfs_inode *ip); +int xfs_dir2_sf_entsize(struct xfs_mount *mp, + struct xfs_dir2_sf_hdr *hdr, int len); +void xfs_dir2_sf_put_ino(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *hdr, + struct xfs_dir2_sf_entry *sfep, xfs_ino_t ino); +void xfs_dir2_sf_put_ftype(struct xfs_mount *mp, + struct xfs_dir2_sf_entry *sfep, uint8_t ftype); /* xfs_dir2_readdir.c */ extern int xfs_readdir(struct xfs_trans *tp, struct xfs_inode *dp, @@ -194,25 +200,8 @@ xfs_dir2_data_entsize( return round_up(len, XFS_DIR2_DATA_ALIGN); } -static inline xfs_dahash_t -xfs_dir2_hashname( - struct xfs_mount *mp, - struct xfs_name *name) -{ - if (unlikely(xfs_sb_version_hasasciici(&mp->m_sb))) - return xfs_ascii_ci_hashname(name); - return xfs_da_hashname(name->name, name->len); -} - -static inline enum xfs_dacmp -xfs_dir2_compname( - struct xfs_da_args *args, - const unsigned char *name, - int len) -{ - if (unlikely(xfs_sb_version_hasasciici(&args->dp->i_mount->m_sb))) - return xfs_ascii_ci_compname(args, name, len); - return xfs_da_compname(args, name, len); -} +xfs_dahash_t xfs_dir2_hashname(struct xfs_mount *mp, struct xfs_name *name); +enum xfs_dacmp xfs_dir2_compname(struct xfs_da_args *args, + const unsigned char *name, int len); #endif /* __XFS_DIR2_PRIV_H__ */ diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 8b94d33d232f..7b7f6fb2ea3b 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -37,7 +37,7 @@ static void xfs_dir2_sf_check(xfs_da_args_t *args); static void xfs_dir2_sf_toino4(xfs_da_args_t *args); static void xfs_dir2_sf_toino8(xfs_da_args_t *args); -static int +int xfs_dir2_sf_entsize( struct xfs_mount *mp, struct xfs_dir2_sf_hdr *hdr, @@ -84,7 +84,7 @@ xfs_dir2_sf_get_ino( return get_unaligned_be64(from) & XFS_MAXINUMBER; } -static void +void xfs_dir2_sf_put_ino( struct xfs_mount *mp, struct xfs_dir2_sf_hdr *hdr, @@ -145,7 +145,7 @@ xfs_dir2_sf_get_ftype( return XFS_DIR3_FT_UNKNOWN; } -static void +void xfs_dir2_sf_put_ftype( struct xfs_mount *mp, struct xfs_dir2_sf_entry *sfep, diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 988cde7744e6..5b759af4d165 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -2909,3 +2909,67 @@ xfs_ialloc_setup_geometry( else igeo->ialloc_align = 0; } + +/* Compute the location of the root directory inode that is laid out by mkfs. */ +xfs_ino_t +xfs_ialloc_calc_rootino( + struct xfs_mount *mp, + int sunit) +{ + struct xfs_ino_geometry *igeo = M_IGEO(mp); + xfs_agblock_t first_bno; + + /* + * Pre-calculate the geometry of AG 0. We know what it looks like + * because libxfs knows how to create allocation groups now. + * + * first_bno is the first block in which mkfs could possibly have + * allocated the root directory inode, once we factor in the metadata + * that mkfs formats before it. Namely, the four AG headers... + */ + first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize); + + /* ...the two free space btree roots... */ + first_bno += 2; + + /* ...the inode btree root... */ + first_bno += 1; + + /* ...the initial AGFL... */ + first_bno += xfs_alloc_min_freelist(mp, NULL); + + /* ...the free inode btree root... */ + if (xfs_sb_version_hasfinobt(&mp->m_sb)) + first_bno++; + + /* ...the reverse mapping btree root... */ + if (xfs_sb_version_hasrmapbt(&mp->m_sb)) + first_bno++; + + /* ...the reference count btree... */ + if (xfs_sb_version_hasreflink(&mp->m_sb)) + first_bno++; + + /* + * ...and the log, if it is allocated in the first allocation group. + * + * This can happen with filesystems that only have a single + * allocation group, or very odd geometries created by old mkfs + * versions on very small filesystems. + */ + if (mp->m_sb.sb_logstart && + XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0) + first_bno += mp->m_sb.sb_logblocks; + + /* + * Now round first_bno up to whatever allocation alignment is given + * by the filesystem or was passed in. + */ + if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0) + first_bno = roundup(first_bno, sunit); + else if (xfs_sb_version_hasalign(&mp->m_sb) && + mp->m_sb.sb_inoalignmt > 1) + first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt); + + return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); +} diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h index 323592d563d5..72b3468b97b1 100644 --- a/fs/xfs/libxfs/xfs_ialloc.h +++ b/fs/xfs/libxfs/xfs_ialloc.h @@ -152,5 +152,6 @@ int xfs_inobt_insert_rec(struct xfs_btree_cur *cur, uint16_t holemask, int xfs_ialloc_cluster_alignment(struct xfs_mount *mp); void xfs_ialloc_setup_geometry(struct xfs_mount *mp); +xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit); #endif /* __XFS_IALLOC_H__ */ diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c index c55cd9a3dec9..7a9c04920505 100644 --- a/fs/xfs/libxfs/xfs_trans_resv.c +++ b/fs/xfs/libxfs/xfs_trans_resv.c @@ -197,6 +197,24 @@ xfs_calc_inode_chunk_res( } /* + * Per-extent log reservation for the btree changes involved in freeing or + * allocating a realtime extent. We have to be able to log as many rtbitmap + * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents, + * as well as the realtime summary block. + */ +static unsigned int +xfs_rtalloc_log_count( + struct xfs_mount *mp, + unsigned int num_ops) +{ + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + unsigned int rtbmp_bytes; + + rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY; + return (howmany(rtbmp_bytes, blksz) + 1) * num_ops; +} + +/* * Various log reservation values. * * These are based on the size of the file system block because that is what @@ -218,13 +236,21 @@ xfs_calc_inode_chunk_res( /* * In a write transaction we can allocate a maximum of 2 - * extents. This gives: + * extents. This gives (t1): * the inode getting the new extents: inode size * the inode's bmap btree: max depth * block size * the agfs of the ags from which the extents are allocated: 2 * sector * the superblock free block counter: sector size * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size - * And the bmap_finish transaction can free bmap blocks in a join: + * Or, if we're writing to a realtime file (t2): + * the inode getting the new extents: inode size + * the inode's bmap btree: max depth * block size + * the agfs of the ags from which the extents are allocated: 2 * sector + * the superblock free block counter: sector size + * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 1 block + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + * And the bmap_finish transaction can free bmap blocks in a join (t3): * the agfs of the ags containing the blocks: 2 * sector size * the agfls of the ags containing the blocks: 2 * sector size * the super block free block counter: sector size @@ -234,40 +260,72 @@ STATIC uint xfs_calc_write_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) + + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t2 = xfs_calc_inode_res(mp, 1) + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), - XFS_FSB_TO_B(mp, 1)) + + blksz) + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1)))); + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz); + } else { + t2 = 0; + } + + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); } /* - * In truncating a file we free up to two extents at once. We can modify: + * In truncating a file we free up to two extents at once. We can modify (t1): * the inode being truncated: inode size * the inode's bmap btree: (max depth + 1) * block size - * And the bmap_finish transaction can free the blocks and bmap blocks: + * And the bmap_finish transaction can free the blocks and bmap blocks (t2): * the agf for each of the ags: 4 * sector size * the agfl for each of the ags: 4 * sector size * the super block to reflect the freed blocks: sector size * worst case split in allocation btrees per extent assuming 4 extents: * 4 exts * 2 trees * (2 * max depth - 1) * block size + * Or, if it's a realtime file (t3): + * the agf for each of the ags: 2 * sector size + * the agfl for each of the ags: 2 * sector size + * the super block to reflect the freed blocks: sector size + * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 2 exts * 1 block + * worst case split in allocation btrees per extent assuming 2 extents: + * 2 exts * 2 trees * (2 * max depth - 1) * block size */ STATIC uint xfs_calc_itruncate_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + - xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), - XFS_FSB_TO_B(mp, 1)))); + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz); + + t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + } else { + t3 = 0; + } + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); } /* diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 2efd78a9719e..e62fb5216341 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -992,6 +992,7 @@ xfs_prepare_shift( struct xfs_inode *ip, loff_t offset) { + struct xfs_mount *mp = ip->i_mount; int error; /* @@ -1005,6 +1006,17 @@ xfs_prepare_shift( } /* + * Shift operations must stabilize the start block offset boundary along + * with the full range of the operation. If we don't, a COW writeback + * completion could race with an insert, front merge with the start + * extent (after split) during the shift and corrupt the file. Start + * with the block just prior to the start to stabilize the boundary. + */ + offset = round_down(offset, 1 << mp->m_sb.sb_blocklog); + if (offset) + offset -= (1 << mp->m_sb.sb_blocklog); + + /* * Writeback and invalidate cache for the remainder of the file as we're * about to shift down every extent from offset to EOF. */ diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 3458a1264a3f..3984779e5911 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -956,7 +956,7 @@ xfs_buf_item_relse( struct xfs_buf_log_item *bip = bp->b_log_item; trace_xfs_buf_item_relse(bp, _RET_IP_); - ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); + ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags)); bp->b_log_item = NULL; if (list_empty(&bp->b_li_list)) diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index fca65109cf24..56efe140c923 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -31,7 +31,7 @@ #include "xfs_reflink.h" #include "xfs_extent_busy.h" #include "xfs_health.h" - +#include "xfs_trace.h" static DEFINE_MUTEX(xfs_uuid_table_mutex); static int xfs_uuid_table_size; @@ -360,66 +360,119 @@ release_buf: } /* - * Update alignment values based on mount options and sb values + * If the sunit/swidth change would move the precomputed root inode value, we + * must reject the ondisk change because repair will stumble over that. + * However, we allow the mount to proceed because we never rejected this + * combination before. Returns true to update the sb, false otherwise. + */ +static inline int +xfs_check_new_dalign( + struct xfs_mount *mp, + int new_dalign, + bool *update_sb) +{ + struct xfs_sb *sbp = &mp->m_sb; + xfs_ino_t calc_ino; + + calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign); + trace_xfs_check_new_dalign(mp, new_dalign, calc_ino); + + if (sbp->sb_rootino == calc_ino) { + *update_sb = true; + return 0; + } + + xfs_warn(mp, +"Cannot change stripe alignment; would require moving root inode."); + + /* + * XXX: Next time we add a new incompat feature, this should start + * returning -EINVAL to fail the mount. Until then, spit out a warning + * that we're ignoring the administrator's instructions. + */ + xfs_warn(mp, "Skipping superblock stripe alignment update."); + *update_sb = false; + return 0; +} + +/* + * If we were provided with new sunit/swidth values as mount options, make sure + * that they pass basic alignment and superblock feature checks, and convert + * them into the same units (FSB) that everything else expects. This step + * /must/ be done before computing the inode geometry. */ STATIC int -xfs_update_alignment(xfs_mount_t *mp) +xfs_validate_new_dalign( + struct xfs_mount *mp) { - xfs_sb_t *sbp = &(mp->m_sb); + if (mp->m_dalign == 0) + return 0; - if (mp->m_dalign) { + /* + * If stripe unit and stripe width are not multiples + * of the fs blocksize turn off alignment. + */ + if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || + (BBTOB(mp->m_swidth) & mp->m_blockmask)) { + xfs_warn(mp, + "alignment check failed: sunit/swidth vs. blocksize(%d)", + mp->m_sb.sb_blocksize); + return -EINVAL; + } else { /* - * If stripe unit and stripe width are not multiples - * of the fs blocksize turn off alignment. + * Convert the stripe unit and width to FSBs. */ - if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || - (BBTOB(mp->m_swidth) & mp->m_blockmask)) { + mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); + if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) { xfs_warn(mp, - "alignment check failed: sunit/swidth vs. blocksize(%d)", - sbp->sb_blocksize); + "alignment check failed: sunit/swidth vs. agsize(%d)", + mp->m_sb.sb_agblocks); return -EINVAL; - } else { - /* - * Convert the stripe unit and width to FSBs. - */ - mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); - if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { - xfs_warn(mp, - "alignment check failed: sunit/swidth vs. agsize(%d)", - sbp->sb_agblocks); - return -EINVAL; - } else if (mp->m_dalign) { - mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); - } else { - xfs_warn(mp, - "alignment check failed: sunit(%d) less than bsize(%d)", - mp->m_dalign, sbp->sb_blocksize); - return -EINVAL; - } - } - - /* - * Update superblock with new values - * and log changes - */ - if (xfs_sb_version_hasdalign(sbp)) { - if (sbp->sb_unit != mp->m_dalign) { - sbp->sb_unit = mp->m_dalign; - mp->m_update_sb = true; - } - if (sbp->sb_width != mp->m_swidth) { - sbp->sb_width = mp->m_swidth; - mp->m_update_sb = true; - } + } else if (mp->m_dalign) { + mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); } else { xfs_warn(mp, - "cannot change alignment: superblock does not support data alignment"); + "alignment check failed: sunit(%d) less than bsize(%d)", + mp->m_dalign, mp->m_sb.sb_blocksize); return -EINVAL; } + } + + if (!xfs_sb_version_hasdalign(&mp->m_sb)) { + xfs_warn(mp, +"cannot change alignment: superblock does not support data alignment"); + return -EINVAL; + } + + return 0; +} + +/* Update alignment values based on mount options and sb values. */ +STATIC int +xfs_update_alignment( + struct xfs_mount *mp) +{ + struct xfs_sb *sbp = &mp->m_sb; + + if (mp->m_dalign) { + bool update_sb; + int error; + + if (sbp->sb_unit == mp->m_dalign && + sbp->sb_width == mp->m_swidth) + return 0; + + error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb); + if (error || !update_sb) + return error; + + sbp->sb_unit = mp->m_dalign; + sbp->sb_width = mp->m_swidth; + mp->m_update_sb = true; } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && xfs_sb_version_hasdalign(&mp->m_sb)) { - mp->m_dalign = sbp->sb_unit; - mp->m_swidth = sbp->sb_width; + mp->m_dalign = sbp->sb_unit; + mp->m_swidth = sbp->sb_width; } return 0; @@ -648,12 +701,12 @@ xfs_mountfs( } /* - * Check if sb_agblocks is aligned at stripe boundary - * If sb_agblocks is NOT aligned turn off m_dalign since - * allocator alignment is within an ag, therefore ag has - * to be aligned at stripe boundary. + * If we were given new sunit/swidth options, do some basic validation + * checks and convert the incore dalign and swidth values to the + * same units (FSB) that everything else uses. This /must/ happen + * before computing the inode geometry. */ - error = xfs_update_alignment(mp); + error = xfs_validate_new_dalign(mp); if (error) goto out; @@ -664,6 +717,17 @@ xfs_mountfs( xfs_rmapbt_compute_maxlevels(mp); xfs_refcountbt_compute_maxlevels(mp); + /* + * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks + * is NOT aligned turn off m_dalign since allocator alignment is within + * an ag, therefore ag has to be aligned at stripe boundary. Note that + * we must compute the free space and rmap btree geometry before doing + * this. + */ + error = xfs_update_alignment(mp); + if (error) + goto out; + /* enable fail_at_unmount as default */ mp->m_fail_unmount = true; diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index c13bb3655e48..a86be7f807ee 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -3573,6 +3573,27 @@ DEFINE_KMEM_EVENT(kmem_alloc_large); DEFINE_KMEM_EVENT(kmem_realloc); DEFINE_KMEM_EVENT(kmem_zone_alloc); +TRACE_EVENT(xfs_check_new_dalign, + TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino), + TP_ARGS(mp, new_dalign, calc_rootino), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, new_dalign) + __field(xfs_ino_t, sb_rootino) + __field(xfs_ino_t, calc_rootino) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->new_dalign = new_dalign; + __entry->sb_rootino = mp->m_sb.sb_rootino; + __entry->calc_rootino = calc_rootino; + ), + TP_printk("dev %d:%d new_dalign %d sb_rootino %llu calc_rootino %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->new_dalign, __entry->sb_rootino, + __entry->calc_rootino) +) + #endif /* _TRACE_XFS_H */ #undef TRACE_INCLUDE_PATH diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index a950a22c4890..cac7404b2bdd 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -11,71 +11,102 @@ * The cache doesn't need to be flushed when TLB entries change when * the cache is mapped to physical memory, not virtual memory */ +#ifndef flush_cache_all static inline void flush_cache_all(void) { } +#endif +#ifndef flush_cache_mm static inline void flush_cache_mm(struct mm_struct *mm) { } +#endif +#ifndef flush_cache_dup_mm static inline void flush_cache_dup_mm(struct mm_struct *mm) { } +#endif +#ifndef flush_cache_range static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +#endif +#ifndef flush_cache_page static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { } +#endif +#ifndef flush_dcache_page static inline void flush_dcache_page(struct page *page) { } +#endif +#ifndef flush_dcache_mmap_lock static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } +#endif +#ifndef flush_dcache_mmap_unlock static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } +#endif +#ifndef flush_icache_range static inline void flush_icache_range(unsigned long start, unsigned long end) { } +#endif +#ifndef flush_icache_page static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { } +#endif +#ifndef flush_icache_user_range static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { } +#endif +#ifndef flush_cache_vmap static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } +#endif +#ifndef flush_cache_vunmap static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { } +#endif -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +#ifndef copy_to_user_page +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ flush_icache_user_range(vma, page, vaddr, len); \ } while (0) +#endif + +#ifndef copy_from_user_page #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) +#endif #endif /* __ASM_CACHEFLUSH_H */ diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index e5503771232f..bcb39da9adb4 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -618,6 +618,12 @@ struct drm_dp_mst_topology_mgr { * &drm_dp_sideband_msg_tx.state once they are queued */ struct mutex qlock; + + /** + * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply + */ + bool is_waiting_for_dwn_reply; + /** * @tx_msg_downq: List of pending down replies. */ diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h index c614438bcbdb..fbc524a900da 100644 --- a/include/dt-bindings/reset/amlogic,meson8b-reset.h +++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h @@ -46,9 +46,9 @@ #define RESET_VD_RMEM 64 #define RESET_AUDIN 65 #define RESET_DBLK 66 -#define RESET_PIC_DC 66 -#define RESET_PSC 66 -#define RESET_NAND 66 +#define RESET_PIC_DC 67 +#define RESET_PSC 68 +#define RESET_NAND 69 #define RESET_GE2D 70 #define RESET_PARSER_REG 71 #define RESET_PARSER_FETCH 72 diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index 6782f0d45ebe..49e5383d4222 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h @@ -19,6 +19,8 @@ struct ahci_host_priv; struct platform_device; struct scsi_host_template; +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv); +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv); int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv); diff --git a/include/linux/bio.h b/include/linux/bio.h index 3cdb84cdc488..853d92ceee64 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, gfp_t); extern int bio_uncopy_user(struct bio *); void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); +void bio_truncate(struct bio *bio, unsigned new_size); static inline void zero_fill_bio(struct bio *bio) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 47eb22a3b7f9..4c636c42ad68 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -328,6 +328,7 @@ struct queue_limits { unsigned int max_sectors; unsigned int max_segment_size; unsigned int physical_block_size; + unsigned int logical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; @@ -338,7 +339,6 @@ struct queue_limits { unsigned int discard_granularity; unsigned int discard_alignment; - unsigned short logical_block_size; unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; @@ -1077,7 +1077,7 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors); extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); -extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); @@ -1291,7 +1291,7 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned short queue_logical_block_size(const struct request_queue *q) +static inline unsigned queue_logical_block_size(const struct request_queue *q) { int retval = 512; @@ -1301,7 +1301,7 @@ static inline unsigned short queue_logical_block_size(const struct request_queue return retval; } -static inline unsigned short bdev_logical_block_size(struct block_device *bdev) +static inline unsigned int bdev_logical_block_size(struct block_device *bdev) { return queue_logical_block_size(bdev_get_queue(bdev)); } diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 169fd25f6bc2..9be71c195d74 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -157,8 +157,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, struct cgroup *cgroup, enum bpf_attach_type type); void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); -int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); -void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); +int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); +void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map); int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, @@ -360,9 +360,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, static inline void bpf_cgroup_storage_set( struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} -static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, +static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } -static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, +static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 35903f148be5..085a59afba85 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -461,6 +461,7 @@ struct bpf_trampoline { struct { struct btf_func_model model; void *addr; + bool ftrace_managed; } func; /* list of BPF programs using this trampoline */ struct hlist_head progs_hlist[BPF_TRAMP_MAX]; @@ -817,6 +818,8 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); int __bpf_prog_charge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages); +void __bpf_free_used_maps(struct bpf_prog_aux *aux, + struct bpf_map **used_maps, u32 len); void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 679a42253170..a81c13ac1972 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -153,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec, } } -/* - * Get the last single-page segment from the multi-page bvec and store it - * in @seg - */ -static inline void mp_bvec_last_segment(const struct bio_vec *bvec, - struct bio_vec *seg) -{ - unsigned total = bvec->bv_offset + bvec->bv_len; - unsigned last_page = (total - 1) / PAGE_SIZE; - - seg->bv_page = bvec->bv_page + last_page; - - /* the whole segment is inside the last page */ - if (bvec->bv_offset >= last_page * PAGE_SIZE) { - seg->bv_offset = bvec->bv_offset % PAGE_SIZE; - seg->bv_len = bvec->bv_len; - } else { - seg->bv_offset = 0; - seg->bv_len = total - last_page * PAGE_SIZE; - } -} - #endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 9b3c720a31b1..5e3d45525bd3 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -18,6 +18,7 @@ #include <linux/can/error.h> #include <linux/can/led.h> #include <linux/can/netlink.h> +#include <linux/can/skb.h> #include <linux/netdevice.h> /* @@ -91,6 +92,36 @@ struct can_priv { #define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) #define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) +/* Check for outgoing skbs that have not been created by the CAN subsystem */ +static inline bool can_skb_headroom_valid(struct net_device *dev, + struct sk_buff *skb) +{ + /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ + if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) + return false; + + /* af_packet does not apply CAN skb specific settings */ + if (skb->ip_summed == CHECKSUM_NONE) { + /* init headroom */ + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* preform proper loopback on capable devices */ + if (dev->flags & IFF_ECHO) + skb->pkt_type = PACKET_LOOPBACK; + else + skb->pkt_type = PACKET_HOST; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + } + + return true; +} + /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ static inline bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) @@ -108,6 +139,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev, } else goto inval_skb; + if (!can_skb_headroom_valid(dev, skb)) + goto inval_skb; + return false; inval_skb: diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 92d5fdc8154e..31b1b0e03df8 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -595,17 +595,6 @@ struct governor_attr { size_t count); }; -static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) -{ - /* - * Allow remote callbacks if: - * - dvfs_possible_from_any_cpu flag is set - * - the local and remote CPUs share cpufreq policy - */ - return policy->dvfs_possible_from_any_cpu || - cpumask_test_cpu(smp_processor_id(), policy->cpus); -} - /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8fcdee1c0cf9..dad4a68fa009 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1364,8 +1364,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) { struct dma_slave_caps caps; + int ret; - dma_get_slave_caps(tx->chan, &caps); + ret = dma_get_slave_caps(tx->chan, &caps); + if (ret) + return ret; if (caps.descriptor_reuse) { tx->flags |= DMA_CTRL_REUSE; diff --git a/include/linux/efi.h b/include/linux/efi.h index 99dfea595c8c..aa54586db7a5 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -824,7 +824,7 @@ typedef struct { __aligned_u64 image_size; unsigned int image_code_type; unsigned int image_data_type; - unsigned long unload; + u32 unload; } efi_loaded_image_32_t; typedef struct { @@ -840,14 +840,14 @@ typedef struct { __aligned_u64 image_size; unsigned int image_code_type; unsigned int image_data_type; - unsigned long unload; + u64 unload; } efi_loaded_image_64_t; typedef struct { u32 revision; - void *parent_handle; + efi_handle_t parent_handle; efi_system_table_t *system_table; - void *device_handle; + efi_handle_t device_handle; void *file_path; void *reserved; u32 load_options_size; @@ -856,7 +856,7 @@ typedef struct { __aligned_u64 image_size; unsigned int image_code_type; unsigned int image_data_type; - unsigned long unload; + efi_status_t (*unload)(efi_handle_t image_handle); } efi_loaded_image_t; diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 76cf11e905e1..8a9792a6427a 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -24,6 +24,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb_mac_header(skb); } +/* Prefer this version in TX path, instead of + * skb_reset_mac_header() + eth_hdr() + */ +static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb->data; +} + static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) { return (struct ethhdr *)skb_inner_mac_header(skb); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 29dce6ff6bae..ce44b687d02b 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -457,7 +457,7 @@ struct jbd2_revoke_table_s; * @h_journal: Which journal handle belongs to - used iff h_reserved set. * @h_rsv_handle: Handle reserved for finishing the logical operation. * @h_total_credits: Number of remaining buffers we are allowed to add to - journal. These are dirty buffers and revoke descriptor blocks. + * journal. These are dirty buffers and revoke descriptor blocks. * @h_revoke_credits: Number of remaining revoke records available for handle * @h_ref: Reference count on this handle. * @h_err: Field for caller's use to track errors through large fs operations. diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 4f404c565db1..e18fe54969e9 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -205,20 +205,23 @@ static inline void *kasan_reset_tag(const void *addr) #endif /* CONFIG_KASAN_SW_TAGS */ #ifdef CONFIG_KASAN_VMALLOC -int kasan_populate_vmalloc(unsigned long requested_size, - struct vm_struct *area); -void kasan_poison_vmalloc(void *start, unsigned long size); +int kasan_populate_vmalloc(unsigned long addr, unsigned long size); +void kasan_poison_vmalloc(const void *start, unsigned long size); +void kasan_unpoison_vmalloc(const void *start, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); #else -static inline int kasan_populate_vmalloc(unsigned long requested_size, - struct vm_struct *area) +static inline int kasan_populate_vmalloc(unsigned long start, + unsigned long size) { return 0; } -static inline void kasan_poison_vmalloc(void *start, unsigned long size) {} +static inline void kasan_poison_vmalloc(const void *start, unsigned long size) +{ } +static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) +{ } static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3adcb39fa6f5..0d9db2a14f44 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -79,15 +79,6 @@ */ #define round_down(x, y) ((x) & ~__round_mask(x, y)) -/** - * FIELD_SIZEOF - get the size of a struct's field - * @t: the target struct - * @f: the target struct's field - * Return: the size of @f in the struct definition without having a - * declared instance of @t. - */ -#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) - #define typeof_member(T, m) typeof(((T*)0)->m) #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP diff --git a/include/linux/libata.h b/include/linux/libata.h index d3bbfddf616a..2dbde119721d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1175,6 +1175,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active); +extern u64 ata_qc_get_active(struct ata_port *ap); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); extern int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 3a08ecdfca11..ba0dca6aac6e 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -122,8 +122,8 @@ static inline bool movable_node_is_enabled(void) extern void arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap); -extern void __remove_pages(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages, struct vmem_altmap *altmap); +extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, + struct vmem_altmap *altmap); /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, @@ -342,6 +342,9 @@ extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); +extern void remove_pfn_range_from_zone(struct zone *zone, + unsigned long start_pfn, + unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); extern int sparse_add_section(int nid, unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap); diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h index f84b9163c0ee..7dfb63b81373 100644 --- a/include/linux/mfd/mt6397/rtc.h +++ b/include/linux/mfd/mt6397/rtc.h @@ -46,6 +46,14 @@ #define RTC_AL_SEC 0x0018 +#define RTC_AL_SEC_MASK 0x003f +#define RTC_AL_MIN_MASK 0x003f +#define RTC_AL_HOU_MASK 0x001f +#define RTC_AL_DOM_MASK 0x001f +#define RTC_AL_DOW_MASK 0x0007 +#define RTC_AL_MTH_MASK 0x000f +#define RTC_AL_YEA_MASK 0x007f + #define RTC_PDN2 0x002e #define RTC_PDN2_PWRON_ALARM BIT(4) diff --git a/include/linux/mm.h b/include/linux/mm.h index c97ea3b694e6..cfaa8feecfe8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2621,6 +2621,9 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); +extern int apply_to_existing_page_range(struct mm_struct *mm, + unsigned long address, unsigned long size, + pte_fn_t fn, void *data); #ifdef CONFIG_PAGE_POISONING extern bool page_poisoning_enabled(void); @@ -2655,14 +2658,26 @@ static inline bool want_init_on_free(void) !page_poisoning_enabled(); } -#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT -DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); +#ifdef CONFIG_DEBUG_PAGEALLOC +extern void init_debug_pagealloc(void); #else -DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); +static inline void init_debug_pagealloc(void) {} #endif +extern bool _debug_pagealloc_enabled_early; +DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); static inline bool debug_pagealloc_enabled(void) { + return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && + _debug_pagealloc_enabled_early; +} + +/* + * For use in fast paths after init_debug_pagealloc() has run, or when a + * false negative result is not harmful when called too early. + */ +static inline bool debug_pagealloc_enabled_static(void) +{ if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) return false; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 89d8ff06c9ce..5334ad8fc7bd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -215,9 +215,8 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */ - NR_SLAB_UNRECLAIMABLE, /* and this one without looking at - * memcg_flush_percpu_vmstats() first. */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 5714fd35a83c..e3596db077dc 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -587,9 +587,9 @@ struct platform_device_id { #define MDIO_NAME_SIZE 32 #define MDIO_MODULE_PREFIX "mdio:" -#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" +#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u" #define MDIO_ID_ARGS(_id) \ - (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ + ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \ diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index ecc88a41792a..c04f690871ca 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -40,7 +40,7 @@ typedef enum { FL_READING, FL_CACHEDPRG, /* These 4 come from onenand_state_t, which has been unified here */ - FL_RESETING, + FL_RESETTING, FL_OTPING, FL_PREPARING_ERASE, FL_VERIFYING_ERASE, diff --git a/include/linux/namei.h b/include/linux/namei.h index 7fe7b87a3ded..07bfb0874033 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -34,7 +34,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; /* internal use only */ #define LOOKUP_PARENT 0x0010 -#define LOOKUP_NO_REVAL 0x0080 #define LOOKUP_JUMPED 0x1000 #define LOOKUP_ROOT 0x2000 #define LOOKUP_ROOT_GRABBED 0x0008 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9ef20389622d..ae5e260911e2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1775,7 +1775,7 @@ enum netdev_priv_flags { * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock - spinlock + * spinlock * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * @qdisc_xmit_lock_key: lockdep class annotating * netdev_queue->_xmit_lock spinlock diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 99cefe6f5edb..491a2b7e77c1 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -12,6 +12,7 @@ #include <linux/of.h> #if IS_ENABLED(CONFIG_OF_MDIO) +extern bool of_mdiobus_child_is_phy(struct device_node *child); extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); extern struct phy_device *of_phy_find_device(struct device_node *phy_np); extern struct phy_device *of_phy_connect(struct net_device *dev, @@ -54,6 +55,11 @@ static inline int of_mdio_parse_addr(struct device *dev, } #else /* CONFIG_OF_MDIO */ +static inline bool of_mdiobus_child_is_phy(struct device_node *child) +{ + return false; +} + static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) { /* diff --git a/include/linux/phy.h b/include/linux/phy.h index 5032d453ac66..dd4a91f1feaa 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1000,7 +1000,7 @@ int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum, int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, u16 mask, u16 set); -struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, +struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); #if IS_ENABLED(CONFIG_PHYLIB) diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 0b9380475144..8cfe570fdece 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -49,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) #define SYSC_MODULE_QUIRK_AESS BIT(19) #define SYSC_MODULE_QUIRK_SGX BIT(18) #define SYSC_MODULE_QUIRK_HDQ1W BIT(17) diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index fe6cfdcfbc26..468328b1e1dd 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -69,29 +69,32 @@ struct posix_clock_operations { * * @ops: Functional interface to the clock * @cdev: Character device instance for this clock - * @kref: Reference count. + * @dev: Pointer to the clock's device. * @rwsem: Protects the 'zombie' field from concurrent access. * @zombie: If 'zombie' is true, then the hardware has disappeared. - * @release: A function to free the structure when the reference count reaches - * zero. May be NULL if structure is statically allocated. * * Drivers should embed their struct posix_clock within a private * structure, obtaining a reference to it during callbacks using * container_of(). + * + * Drivers should supply an initialized but not exposed struct device + * to posix_clock_register(). It is used to manage lifetime of the + * driver's private structure. It's 'release' field should be set to + * a release function for this private structure. */ struct posix_clock { struct posix_clock_operations ops; struct cdev cdev; - struct kref kref; + struct device *dev; struct rw_semaphore rwsem; bool zombie; - void (*release)(struct posix_clock *clk); }; /** * posix_clock_register() - register a new clock - * @clk: Pointer to the clock. Caller must provide 'ops' and 'release' - * @devid: Allocated device id + * @clk: Pointer to the clock. Caller must provide 'ops' field + * @dev: Pointer to the initialized device. Caller must provide + * 'release' field * * A clock driver calls this function to register itself with the * clock device subsystem. If 'clk' points to dynamically allocated @@ -100,7 +103,7 @@ struct posix_clock { * * Returns zero on success, non-zero otherwise. */ -int posix_clock_register(struct posix_clock *clk, dev_t devid); +int posix_clock_register(struct posix_clock *clk, struct device *dev); /** * posix_clock_unregister() - unregister a clock diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index bc8206a8f30e..61974c4c566b 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -101,6 +101,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, } /** + * hlist_nulls_add_tail_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist_nulls, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *i, *last = NULL; + + /* Note: write side code, so rcu accessors are not needed. */ + for (i = h->first; !is_a_nulls(i); i = i->next) + last = i; + + if (last) { + n->next = last->next; + n->pprev = &last->next; + rcu_assign_pointer(hlist_next_rcu(last), n); + } else { + hlist_nulls_add_head_rcu(n, h); + } +} + +/** * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_nulls_node to use as a loop cursor. diff --git a/include/linux/sched.h b/include/linux/sched.h index 467d26046416..716ad1d8d95e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1929,11 +1929,11 @@ static inline void rseq_migrate(struct task_struct *t) /* * If parent process has a registered restartable sequences area, the - * child inherits. Only applies when forking a process, not a thread. + * child inherits. Unregister rseq for a clone with CLONE_VM set. */ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) { - if (clone_flags & CLONE_THREAD) { + if (clone_flags & CLONE_VM) { t->rseq = NULL; t->rseq_sig = 0; t->rseq_event_mask = 0; diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index afa940cd50dc..cc6bcc1e96bc 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -12,6 +12,8 @@ #define SCHED_CPUFREQ_MIGRATION (1U << 1) #ifdef CONFIG_CPU_FREQ +struct cpufreq_policy; + struct update_util_data { void (*func)(struct update_util_data *data, u64 time, unsigned int flags); }; @@ -20,6 +22,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); +bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); static inline unsigned long map_util_freq(unsigned long util, unsigned long freq, unsigned long cap) diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index ef7031f8a304..14d61bba0b79 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -358,17 +358,22 @@ static inline void sk_psock_update_proto(struct sock *sk, static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { - sk->sk_write_space = psock->saved_write_space; + sk->sk_prot->unhash = psock->saved_unhash; if (psock->sk_proto) { struct inet_connection_sock *icsk = inet_csk(sk); bool has_ulp = !!icsk->icsk_ulp_data; - if (has_ulp) - tcp_update_ulp(sk, psock->sk_proto); - else + if (has_ulp) { + tcp_update_ulp(sk, psock->sk_proto, + psock->saved_write_space); + } else { sk->sk_prot = psock->sk_proto; + sk->sk_write_space = psock->saved_write_space; + } psock->sk_proto = NULL; + } else { + sk->sk_write_space = psock->saved_write_space; } } diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 98fe8663033a..3a67a7e45633 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -689,10 +689,10 @@ extern void spi_finalize_current_transfer(struct spi_controller *ctlr); /* Helper calls for driver to timestamp transfer */ void spi_take_timestamp_pre(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off); + size_t progress, bool irqs_off); void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off); + size_t progress, bool irqs_off); /* the spi driver core manages memory for the spi_controller classdev */ extern struct spi_controller *__spi_alloc_controller(struct device *host, diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h index 85ec745767bd..966146f7267a 100644 --- a/include/linux/sxgbe_platform.h +++ b/include/linux/sxgbe_platform.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * 10G controller driver for Samsung EXYNOS SoCs + * 10G controller driver for Samsung Exynos SoCs * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2960dedcfde8..5262b7a76d39 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1232,6 +1232,7 @@ asmlinkage long sys_ni_syscall(void); */ int ksys_umount(char __user *name, int flags); +int ksys_dup(unsigned int fildes); int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); int ksys_chdir(const char __user *filename); diff --git a/include/linux/tnum.h b/include/linux/tnum.h index c17af77f3fae..ea627d1ab7e3 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -30,7 +30,7 @@ struct tnum tnum_lshift(struct tnum a, u8 shift); /* Shift (rsh) a tnum right (by a fixed shift) */ struct tnum tnum_rshift(struct tnum a, u8 shift); /* Shift (arsh) a tnum right (by a fixed min_shift) */ -struct tnum tnum_arshift(struct tnum a, u8 min_shift); +struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness); /* Add two tnums, return @a + @b */ struct tnum tnum_add(struct tnum a, struct tnum b); /* Subtract two tnums, return @a - @b */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 0d6e949ba315..03e9b184411b 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -403,6 +403,7 @@ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); extern struct tpm_chip *tpm_default_chip(void); +void tpm2_flush_context(struct tpm_chip *chip, u32 handle); #else static inline int tpm_is_tpm2(struct tpm_chip *chip) { diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 059524b87c4c..f22bd6c838a3 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3548,6 +3548,9 @@ struct cfg80211_update_owe_info { * * @start_radar_detection: Start radar detection in the driver. * + * @end_cac: End running CAC, probably because a related CAC + * was finished on another phy. + * * @update_ft_ies: Provide updated Fast BSS Transition information to the * driver. If the SME is in the driver/firmware, this information can be * used in building Authentication and Reassociation Request frames. @@ -3874,6 +3877,8 @@ struct cfg80211_ops { struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms); + void (*end_cac)(struct wiphy *wiphy, + struct net_device *dev); int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_update_ft_ies_params *ftie); int (*crit_proto_start)(struct wiphy *wiphy, diff --git a/include/net/devlink.h b/include/net/devlink.h index 47f87b2fcf63..38b4acb93f74 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -938,7 +938,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink, u32 region_max_snapshots, u64 region_size); void devlink_region_destroy(struct devlink_region *region); -u32 devlink_region_shapshot_id_get(struct devlink *devlink); +u32 devlink_region_snapshot_id_get(struct devlink *devlink); int devlink_region_snapshot_create(struct devlink_region *region, u8 *data, u32 snapshot_id, devlink_snapshot_data_dest_t *data_destructor); diff --git a/include/net/dst.h b/include/net/dst.h index fe62fe2eb781..3448cf865ede 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -82,7 +82,7 @@ struct dst_entry { struct dst_metrics { u32 metrics[RTAX_MAX]; refcount_t refcnt; -}; +} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */ extern const struct dst_metrics dst_default_metrics; u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); @@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu) struct dst_entry *dst = skb_dst(skb); if (dst && dst->ops->update_pmtu) - dst->ops->update_pmtu(dst, NULL, skb, mtu); + dst->ops->update_pmtu(dst, NULL, skb, mtu, true); +} + +/* update dst pmtu but not do neighbor confirm */ +static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) +{ + struct dst_entry *dst = skb_dst(skb); + + if (dst && dst->ops->update_pmtu) + dst->ops->update_pmtu(dst, NULL, skb, mtu, false); } static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, @@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, u32 encap_mtu = dst_mtu(encap_dst); if (skb->len > encap_mtu - headroom) - skb_dst_update_pmtu(skb, encap_mtu - headroom); + skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom); } #endif /* _NET_DST_H */ diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 5ec645f27ee3..443863c7b8da 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -27,7 +27,8 @@ struct dst_ops { struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu); + struct sk_buff *skb, u32 mtu, + bool confirm_neigh); void (*redirect)(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index af2b4c065a04..d0019d3395cf 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -103,13 +103,19 @@ struct inet_bind_hashbucket { struct hlist_head chain; }; -/* - * Sockets can be hashed in established or listening table +/* Sockets can be hashed in established or listening table. + * We must use different 'nulls' end-of-chain value for all hash buckets : + * A socket might transition from ESTABLISH to LISTEN state without + * RCU grace period. A lookup in ehash table needs to handle this case. */ +#define LISTENING_NULLS_BASE (1U << 29) struct inet_listen_hashbucket { spinlock_t lock; unsigned int count; - struct hlist_head head; + union { + struct hlist_head head; + struct hlist_nulls_head nulls_head; + }; }; /* This is for listening sockets, thus all sockets which possess wildcards. */ diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 6ad9ad47a9c5..8ec77bfdc1a4 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -72,7 +72,6 @@ struct neigh_parms { struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); - void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index f0897b3c97fb..415b8f49d150 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -106,6 +106,12 @@ struct flow_offload { }; #define NF_FLOW_TIMEOUT (30 * HZ) +#define nf_flowtable_time_stamp (u32)jiffies + +static inline __s32 nf_flow_timeout_delta(unsigned int timeout) +{ + return (__s32)(timeout - nf_flowtable_time_stamp); +} struct nf_flow_route { struct { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 144f264ea394..fceddf89592a 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -308,6 +308,7 @@ struct tcf_proto_ops { int (*delete)(struct tcf_proto *tp, void *arg, bool *last, bool rtnl_held, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *tp); void (*walk)(struct tcf_proto *tp, struct tcf_walker *arg, bool rtnl_held); int (*reoffload)(struct tcf_proto *tp, bool add, @@ -336,6 +337,10 @@ struct tcf_proto_ops { int flags; }; +/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags + * are expected to implement tcf_proto_ops->delete_empty(), otherwise race + * conditions can occur when filters are inserted/deleted simultaneously. + */ enum tcf_proto_ops_flags { TCF_PROTO_OPS_DOIT_UNLOCKED = 1, }; diff --git a/include/net/sock.h b/include/net/sock.h index 80f996406bba..8dff68b4c316 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -722,6 +722,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); } +static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list) +{ + hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); +} + static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { sock_hold(sk); @@ -2583,9 +2588,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) */ static inline void sk_pacing_shift_update(struct sock *sk, int val) { - if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val) + if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val) return; - sk->sk_pacing_shift = val; + WRITE_ONCE(sk->sk_pacing_shift, val); } /* if a socket is bound to a device, check that the given device diff --git a/include/net/tcp.h b/include/net/tcp.h index 86b9a8766648..e6f48384dc71 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1766,9 +1766,18 @@ static inline bool tcp_skb_is_last(const struct sock *sk, return skb_queue_is_last(&sk->sk_write_queue, skb); } +/** + * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue + * @sk: socket + * + * Since the write queue can have a temporary empty skb in it, + * we must not use "return skb_queue_empty(&sk->sk_write_queue)" + */ static inline bool tcp_write_queue_empty(const struct sock *sk) { - return skb_queue_empty(&sk->sk_write_queue); + const struct tcp_sock *tp = tcp_sk(sk); + + return tp->write_seq == tp->snd_nxt; } static inline bool tcp_rtx_queue_empty(const struct sock *sk) @@ -2138,7 +2147,8 @@ struct tcp_ulp_ops { /* initialize ulp */ int (*init)(struct sock *sk); /* update ulp */ - void (*update)(struct sock *sk, struct proto *p); + void (*update)(struct sock *sk, struct proto *p, + void (*write_space)(struct sock *sk)); /* cleanup ulp */ void (*release)(struct sock *sk); /* diagnostic */ @@ -2153,7 +2163,8 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type); int tcp_set_ulp(struct sock *sk, const char *name); void tcp_get_available_ulp(char *buf, size_t len); void tcp_cleanup_ulp(struct sock *sk); -void tcp_update_ulp(struct sock *sk, struct proto *p); +void tcp_update_ulp(struct sock *sk, struct proto *p, + void (*write_space)(struct sock *sk)); #define MODULE_ALIAS_TCP_ULP(name) \ __MODULE_INFO(alias, alias_userspace, name); \ diff --git a/include/net/x25.h b/include/net/x25.h index ed1acc3044ac..d7d6c2b4ffa7 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -62,7 +62,8 @@ enum { X25_STATE_1, /* Awaiting Call Accepted */ X25_STATE_2, /* Awaiting Clear Confirmation */ X25_STATE_3, /* Data Transfer */ - X25_STATE_4 /* Awaiting Reset Confirmation */ + X25_STATE_4, /* Awaiting Reset Confirmation */ + X25_STATE_5 /* Call Accepted / Call Connected pending */ }; enum { diff --git a/arch/riscv/include/asm/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h index 04f6748fc50b..92ade10ed67e 100644 --- a/arch/riscv/include/asm/sifive_l2_cache.h +++ b/include/soc/sifive/sifive_l2_cache.h @@ -4,8 +4,8 @@ * */ -#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H -#define _ASM_RISCV_SIFIVE_L2_CACHE_H +#ifndef __SOC_SIFIVE_L2_CACHE_H +#define __SOC_SIFIVE_L2_CACHE_H extern int register_sifive_l2_error_notifier(struct notifier_block *nb); extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb); @@ -13,4 +13,4 @@ extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb); #define SIFIVE_L2_ERR_TYPE_CE 0 #define SIFIVE_L2_ERR_TYPE_UE 1 -#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */ +#endif /* __SOC_SIFIVE_L2_CACHE_H */ diff --git a/include/sound/soc.h b/include/sound/soc.h index c28a1ed5e8df..262896799826 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1150,6 +1150,7 @@ struct snd_soc_pcm_runtime { unsigned int num_codecs; struct delayed_work delayed_work; + void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd); #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_dpcm_root; #endif diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index d5ec4fac82ae..564ba1b5cf57 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -915,9 +915,9 @@ TRACE_EVENT(afs_call_state, TRACE_EVENT(afs_lookup, TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name, - struct afs_vnode *vnode), + struct afs_fid *fid), - TP_ARGS(dvnode, name, vnode), + TP_ARGS(dvnode, name, fid), TP_STRUCT__entry( __field_struct(struct afs_fid, dfid ) @@ -928,13 +928,7 @@ TRACE_EVENT(afs_lookup, TP_fast_assign( int __len = min_t(int, name->len, 23); __entry->dfid = dvnode->fid; - if (vnode) { - __entry->fid = vnode->fid; - } else { - __entry->fid.vid = 0; - __entry->fid.vnode = 0; - __entry->fid.unique = 0; - } + __entry->fid = *fid; memcpy(__entry->name, name->name, __len); __entry->name[__len] = 0; ), diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index dd4db334bd63..d82a0f4e824d 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -31,7 +31,8 @@ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ - EMe(SCAN_TRUNCATED, "truncated") \ + EM( SCAN_TRUNCATED, "truncated") \ + EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \ #undef EM #undef EMe diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h index 95fba0471e5b..3f249e150c0c 100644 --- a/include/trace/events/preemptirq.h +++ b/include/trace/events/preemptirq.h @@ -18,13 +18,13 @@ DECLARE_EVENT_CLASS(preemptirq_template, TP_ARGS(ip, parent_ip), TP_STRUCT__entry( - __field(u32, caller_offs) - __field(u32, parent_offs) + __field(s32, caller_offs) + __field(s32, parent_offs) ), TP_fast_assign( - __entry->caller_offs = (u32)(ip - (unsigned long)_stext); - __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext); + __entry->caller_offs = (s32)(ip - (unsigned long)_stext); + __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext); ), TP_printk("caller=%pS parent=%pS", diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index f056b2a00d5c..9a61c28ed3ae 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -34,6 +34,7 @@ struct input_event { __kernel_ulong_t __sec; #if defined(__sparc__) && defined(__arch64__) unsigned int __usec; + unsigned int __pad; #else __kernel_ulong_t __usec; #endif diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h index 409d3ad1e6e2..1d0350e44ae3 100644 --- a/include/uapi/linux/kcov.h +++ b/include/uapi/linux/kcov.h @@ -9,11 +9,11 @@ * and the comment before kcov_remote_start() for usage details. */ struct kcov_remote_arg { - unsigned int trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */ - unsigned int area_size; /* Length of coverage buffer in words */ - unsigned int num_handles; /* Size of handles array */ - __u64 common_handle; - __u64 handles[0]; + __u32 trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */ + __u32 area_size; /* Length of coverage buffer in words */ + __u32 num_handles; /* Size of handles array */ + __aligned_u64 common_handle; + __aligned_u64 handles[0]; }; #define KCOV_REMOTE_MAX_HANDLES 0x100 diff --git a/include/uapi/linux/netfilter/xt_sctp.h b/include/uapi/linux/netfilter/xt_sctp.h index 4bc6d1a08781..b4d804a9fccb 100644 --- a/include/uapi/linux/netfilter/xt_sctp.h +++ b/include/uapi/linux/netfilter/xt_sctp.h @@ -41,19 +41,19 @@ struct xt_sctp_info { #define SCTP_CHUNKMAP_SET(chunkmap, type) \ do { \ (chunkmap)[type / bytes(__u32)] |= \ - 1 << (type % bytes(__u32)); \ + 1u << (type % bytes(__u32)); \ } while (0) #define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \ do { \ (chunkmap)[type / bytes(__u32)] &= \ - ~(1 << (type % bytes(__u32))); \ + ~(1u << (type % bytes(__u32))); \ } while (0) #define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \ ({ \ ((chunkmap)[type / bytes (__u32)] & \ - (1 << (type % bytes (__u32)))) ? 1: 0; \ + (1u << (type % bytes (__u32)))) ? 1: 0; \ }) #define SCTP_CHUNKMAP_RESET(chunkmap) \ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 341e0e8cae46..5eab191607f8 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -5517,6 +5517,10 @@ enum nl80211_feature_flags { * with VLAN tagged frames and separate VLAN-specific netdevs added using * vconfig similarly to the Ethernet case. * + * @NL80211_EXT_FEATURE_AQL: The driver supports the Airtime Queue Limit (AQL) + * feature, which prevents bufferbloat by using the expected transmission + * time to limit the amount of data buffered in the hardware. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5563,6 +5567,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_STA_TX_PWR, NL80211_EXT_FEATURE_SAE_OFFLOAD, NL80211_EXT_FEATURE_VLAN_OFFLOAD, + NL80211_EXT_FEATURE_AQL, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 3f40501fc60b..2af7a1cd6658 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -125,35 +125,24 @@ struct __name##_back_ring { \ memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) -#define FRONT_RING_INIT(_r, _s, __size) do { \ - (_r)->req_prod_pvt = 0; \ - (_r)->rsp_cons = 0; \ +#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ + (_r)->req_prod_pvt = (_i); \ + (_r)->rsp_cons = (_i); \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) -#define BACK_RING_INIT(_r, _s, __size) do { \ - (_r)->rsp_prod_pvt = 0; \ - (_r)->req_cons = 0; \ - (_r)->nr_ents = __RING_SIZE(_s, __size); \ - (_r)->sring = (_s); \ -} while (0) +#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) -/* Initialize to existing shared indexes -- for recovery */ -#define FRONT_RING_ATTACH(_r, _s, __size) do { \ - (_r)->sring = (_s); \ - (_r)->req_prod_pvt = (_s)->req_prod; \ - (_r)->rsp_cons = (_s)->rsp_prod; \ +#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ + (_r)->rsp_prod_pvt = (_i); \ + (_r)->req_cons = (_i); \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ -} while (0) - -#define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ - (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ - (_r)->req_cons = (_s)->req_prod; \ - (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) +#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size) + /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 869c816d5f8c..24228a102141 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -93,6 +93,7 @@ struct xenbus_device_id struct xenbus_driver { const char *name; /* defaults to ids[0].devicetype */ const struct xenbus_device_id *ids; + bool allow_rebind; /* avoid setting xenstore closed during remove */ int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, diff --git a/init/do_mounts.c b/init/do_mounts.c index f55cbd9cb818..0ae9cc22f2ae 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -391,17 +391,19 @@ static int __init do_mount_root(const char *name, const char *fs, const int flags, const void *data) { struct super_block *s; - char *data_page; - struct page *p; + struct page *p = NULL; + char *data_page = NULL; int ret; - /* do_mount() requires a full page as fifth argument */ - p = alloc_page(GFP_KERNEL); - if (!p) - return -ENOMEM; - - data_page = page_address(p); - strncpy(data_page, data, PAGE_SIZE - 1); + if (data) { + /* do_mount() requires a full page as fifth argument */ + p = alloc_page(GFP_KERNEL); + if (!p) + return -ENOMEM; + data_page = page_address(p); + /* zero-pad. do_mount() will make sure it's terminated */ + strncpy(data_page, data, PAGE_SIZE); + } ret = do_mount(name, "/root", fs, flags, data_page); if (ret) @@ -417,7 +419,8 @@ static int __init do_mount_root(const char *name, const char *fs, MAJOR(ROOT_DEV), MINOR(ROOT_DEV)); out: - put_page(p); + if (p) + put_page(p); return ret; } diff --git a/init/main.c b/init/main.c index ec3a1463ac69..da1bc0b60a7d 100644 --- a/init/main.c +++ b/init/main.c @@ -93,7 +93,6 @@ #include <linux/rodata_test.h> #include <linux/jump_label.h> #include <linux/mem_encrypt.h> -#include <linux/file.h> #include <asm/io.h> #include <asm/bugs.h> @@ -554,6 +553,7 @@ static void __init mm_init(void) * bigger than MAX_ORDER unless SPARSEMEM. */ page_ext_init_flatmem(); + init_debug_pagealloc(); report_meminit(); mem_init(); kmem_cache_init(); @@ -1158,26 +1158,13 @@ static int __ref kernel_init(void *unused) void console_on_rootfs(void) { - struct file *file; - unsigned int i; - - /* Open /dev/console in kernelspace, this should never fail */ - file = filp_open("/dev/console", O_RDWR, 0); - if (!file) - goto err_out; - - /* create stdin/stdout/stderr, this should never fail */ - for (i = 0; i < 3; i++) { - if (f_dupfd(i, file, 0) != i) - goto err_out; - } - - return; + /* Open the /dev/console as stdin, this should never fail */ + if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) + pr_err("Warning: unable to open an initial console.\n"); -err_out: - /* no panic -- this might not be fatal */ - pr_err("Warning: unable to open an initial console.\n"); - return; + /* create stdout/stderr */ + (void) ksys_dup(0); + (void) ksys_dup(0); } static noinline void __init kernel_init_freeable(void) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7d40da240891..ed2075884724 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3470,6 +3470,7 @@ static u8 bpf_ctx_convert_map[] = { [_id] = __ctx_convert##_id, #include <linux/bpf_types.h> #undef BPF_PROG_TYPE + 0, /* avoid empty array */ }; #undef BPF_MAP_TYPE diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 4fb20ab179fe..9e43b72eb619 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -35,8 +35,8 @@ void cgroup_bpf_offline(struct cgroup *cgrp) */ static void cgroup_bpf_release(struct work_struct *work) { - struct cgroup *cgrp = container_of(work, struct cgroup, - bpf.release_work); + struct cgroup *p, *cgrp = container_of(work, struct cgroup, + bpf.release_work); enum bpf_cgroup_storage_type stype; struct bpf_prog_array *old_array; unsigned int type; @@ -65,6 +65,9 @@ static void cgroup_bpf_release(struct work_struct *work) mutex_unlock(&cgroup_mutex); + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) + cgroup_bpf_put(p); + percpu_ref_exit(&cgrp->bpf.refcnt); cgroup_put(cgrp); } @@ -199,6 +202,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) */ #define NR ARRAY_SIZE(cgrp->bpf.effective) struct bpf_prog_array *arrays[NR] = {}; + struct cgroup *p; int ret, i; ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, @@ -206,6 +210,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) if (ret) return ret; + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) + cgroup_bpf_get(p); + for (i = 0; i < NR; i++) INIT_LIST_HEAD(&cgrp->bpf.progs[i]); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 49e32acad7d8..af6b738cf435 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2043,23 +2043,28 @@ static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux) for_each_cgroup_storage_type(stype) { if (!aux->cgroup_storage[stype]) continue; - bpf_cgroup_storage_release(aux->prog, - aux->cgroup_storage[stype]); + bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]); } } -static void bpf_free_used_maps(struct bpf_prog_aux *aux) +void __bpf_free_used_maps(struct bpf_prog_aux *aux, + struct bpf_map **used_maps, u32 len) { struct bpf_map *map; - int i; + u32 i; bpf_free_cgroup_storage(aux); - for (i = 0; i < aux->used_map_cnt; i++) { - map = aux->used_maps[i]; + for (i = 0; i < len; i++) { + map = used_maps[i]; if (map->ops->map_poke_untrack) map->ops->map_poke_untrack(map, aux); bpf_map_put(map); } +} + +static void bpf_free_used_maps(struct bpf_prog_aux *aux) +{ + __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); kfree(aux->used_maps); } diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 6bd22f6d9f41..33d01866bcc2 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -20,7 +20,7 @@ struct bpf_cgroup_storage_map { struct bpf_map map; spinlock_t lock; - struct bpf_prog *prog; + struct bpf_prog_aux *aux; struct rb_root root; struct list_head list; }; @@ -420,7 +420,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = { .map_seq_show_elem = cgroup_storage_seq_show_elem, }; -int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) +int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map) { enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); struct bpf_cgroup_storage_map *map = map_to_storage(_map); @@ -428,14 +428,14 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) spin_lock_bh(&map->lock); - if (map->prog && map->prog != prog) + if (map->aux && map->aux != aux) goto unlock; - if (prog->aux->cgroup_storage[stype] && - prog->aux->cgroup_storage[stype] != _map) + if (aux->cgroup_storage[stype] && + aux->cgroup_storage[stype] != _map) goto unlock; - map->prog = prog; - prog->aux->cgroup_storage[stype] = _map; + map->aux = aux; + aux->cgroup_storage[stype] = _map; ret = 0; unlock: spin_unlock_bh(&map->lock); @@ -443,16 +443,16 @@ unlock: return ret; } -void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) +void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *_map) { enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); struct bpf_cgroup_storage_map *map = map_to_storage(_map); spin_lock_bh(&map->lock); - if (map->prog == prog) { - WARN_ON(prog->aux->cgroup_storage[stype] != _map); - map->prog = NULL; - prog->aux->cgroup_storage[stype] = NULL; + if (map->aux == aux) { + WARN_ON(aux->cgroup_storage[stype] != _map); + map->aux = NULL; + aux->cgroup_storage[stype] = NULL; } spin_unlock_bh(&map->lock); } diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c index ca52b9642943..d4f335a9a899 100644 --- a/kernel/bpf/tnum.c +++ b/kernel/bpf/tnum.c @@ -44,14 +44,19 @@ struct tnum tnum_rshift(struct tnum a, u8 shift) return TNUM(a.value >> shift, a.mask >> shift); } -struct tnum tnum_arshift(struct tnum a, u8 min_shift) +struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness) { /* if a.value is negative, arithmetic shifting by minimum shift * will have larger negative offset compared to more shifting. * If a.value is nonnegative, arithmetic shifting by minimum shift * will have larger positive offset compare to more shifting. */ - return TNUM((s64)a.value >> min_shift, (s64)a.mask >> min_shift); + if (insn_bitness == 32) + return TNUM((u32)(((s32)a.value) >> min_shift), + (u32)(((s32)a.mask) >> min_shift)); + else + return TNUM((s64)a.value >> min_shift, + (s64)a.mask >> min_shift); } struct tnum tnum_add(struct tnum a, struct tnum b) diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 7e89f1f49d77..23b0d5cfd47e 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -3,6 +3,7 @@ #include <linux/hash.h> #include <linux/bpf.h> #include <linux/filter.h> +#include <linux/ftrace.h> /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */ #define TRAMPOLINE_HASH_BITS 10 @@ -59,6 +60,60 @@ out: return tr; } +static int is_ftrace_location(void *ip) +{ + long addr; + + addr = ftrace_location((long)ip); + if (!addr) + return 0; + if (WARN_ON_ONCE(addr != (long)ip)) + return -EFAULT; + return 1; +} + +static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) +{ + void *ip = tr->func.addr; + int ret; + + if (tr->func.ftrace_managed) + ret = unregister_ftrace_direct((long)ip, (long)old_addr); + else + ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); + return ret; +} + +static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr) +{ + void *ip = tr->func.addr; + int ret; + + if (tr->func.ftrace_managed) + ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr); + else + ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr); + return ret; +} + +/* first time registering */ +static int register_fentry(struct bpf_trampoline *tr, void *new_addr) +{ + void *ip = tr->func.addr; + int ret; + + ret = is_ftrace_location(ip); + if (ret < 0) + return ret; + tr->func.ftrace_managed = ret; + + if (tr->func.ftrace_managed) + ret = register_ftrace_direct((long)ip, (long)new_addr); + else + ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); + return ret; +} + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into PAGE_SIZE / 2 */ @@ -77,8 +132,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) int err; if (fentry_cnt + fexit_cnt == 0) { - err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, - old_image, NULL); + err = unregister_fentry(tr, old_image); tr->selector = 0; goto out; } @@ -105,12 +159,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) if (tr->selector) /* progs already running at this address */ - err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, - old_image, new_image); + err = modify_fentry(tr, old_image, new_image); else /* first time registering */ - err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL, - new_image); + err = register_fentry(tr, new_image); if (err) goto out; tr->selector++; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 034ef81f935b..7d530ce8719d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -907,7 +907,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; -static void __mark_reg_not_init(struct bpf_reg_state *reg); +static void __mark_reg_not_init(const struct bpf_verifier_env *env, + struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. @@ -945,7 +946,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env, verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) - __mark_reg_not_init(regs + regno); + __mark_reg_not_init(env, regs + regno); return; } __mark_reg_known_zero(regs + regno); @@ -1054,7 +1055,8 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg) } /* Mark a register as having a completely unknown (scalar) value. */ -static void __mark_reg_unknown(struct bpf_reg_state *reg) +static void __mark_reg_unknown(const struct bpf_verifier_env *env, + struct bpf_reg_state *reg) { /* * Clear type, id, off, and union(map_ptr, range) and @@ -1064,6 +1066,8 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg) reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; + reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? + true : false; __mark_reg_unbounded(reg); } @@ -1074,19 +1078,16 @@ static void mark_reg_unknown(struct bpf_verifier_env *env, verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) - __mark_reg_not_init(regs + regno); + __mark_reg_not_init(env, regs + regno); return; } - regs += regno; - __mark_reg_unknown(regs); - /* constant backtracking is enabled for root without bpf2bpf calls */ - regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? - true : false; + __mark_reg_unknown(env, regs + regno); } -static void __mark_reg_not_init(struct bpf_reg_state *reg) +static void __mark_reg_not_init(const struct bpf_verifier_env *env, + struct bpf_reg_state *reg) { - __mark_reg_unknown(reg); + __mark_reg_unknown(env, reg); reg->type = NOT_INIT; } @@ -1097,10 +1098,10 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) - __mark_reg_not_init(regs + regno); + __mark_reg_not_init(env, regs + regno); return; } - __mark_reg_not_init(regs + regno); + __mark_reg_not_init(env, regs + regno); } #define DEF_NOT_SUBREG (0) @@ -3234,7 +3235,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, } if (state->stack[spi].slot_type[0] == STACK_SPILL && state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { - __mark_reg_unknown(&state->stack[spi].spilled_ptr); + __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) state->stack[spi].slot_type[j] = STACK_MISC; goto mark; @@ -3892,7 +3893,7 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, if (!reg) continue; if (reg_is_pkt_pointer_any(reg)) - __mark_reg_unknown(reg); + __mark_reg_unknown(env, reg); } } @@ -3920,7 +3921,7 @@ static void release_reg_references(struct bpf_verifier_env *env, if (!reg) continue; if (reg->ref_obj_id == ref_obj_id) - __mark_reg_unknown(reg); + __mark_reg_unknown(env, reg); } } @@ -4134,6 +4135,7 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, struct bpf_map *map = meta->map_ptr; struct tnum range; u64 val; + int err; if (func_id != BPF_FUNC_tail_call) return 0; @@ -4150,6 +4152,10 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, return 0; } + err = mark_chain_precision(env, BPF_REG_3); + if (err) + return err; + val = reg->var_off.value; if (bpf_map_key_unseen(aux)) bpf_map_key_store(aux, val); @@ -4577,7 +4583,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ - __mark_reg_unknown(dst_reg); + __mark_reg_unknown(env, dst_reg); return 0; } @@ -4829,13 +4835,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ - __mark_reg_unknown(dst_reg); + __mark_reg_unknown(env, dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { - __mark_reg_unknown(dst_reg); + __mark_reg_unknown(env, dst_reg); return 0; } @@ -5043,9 +5049,16 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ - dst_reg->smin_value >>= umin_val; - dst_reg->smax_value >>= umin_val; - dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); + if (insn_bitness == 32) { + dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val); + dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val); + } else { + dst_reg->smin_value >>= umin_val; + dst_reg->smax_value >>= umin_val; + } + + dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, + insn_bitness); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. @@ -6258,6 +6271,7 @@ static bool may_access_skb(enum bpf_prog_type type) static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); + static const int ctx_reg = BPF_REG_6; u8 mode = BPF_MODE(insn->code); int i, err; @@ -6291,7 +6305,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check whether implicit source operand (register R6) is readable */ - err = check_reg_arg(env, BPF_REG_6, SRC_OP); + err = check_reg_arg(env, ctx_reg, SRC_OP); if (err) return err; @@ -6310,7 +6324,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } - if (regs[BPF_REG_6].type != PTR_TO_CTX) { + if (regs[ctx_reg].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; @@ -6323,6 +6337,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) return err; } + err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); + if (err < 0) + return err; + /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); @@ -6977,7 +6995,7 @@ static void clean_func_state(struct bpf_verifier_env *env, /* since the register is unused, clear its state * to make further comparison simpler */ - __mark_reg_not_init(&st->regs[i]); + __mark_reg_not_init(env, &st->regs[i]); } for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { @@ -6985,7 +7003,7 @@ static void clean_func_state(struct bpf_verifier_env *env, /* liveness must not touch this stack slot anymore */ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) { - __mark_reg_not_init(&st->stack[i].spilled_ptr); + __mark_reg_not_init(env, &st->stack[i].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) st->stack[i].slot_type[j] = STACK_INVALID; } @@ -8268,7 +8286,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) env->used_maps[env->used_map_cnt++] = map; if (bpf_map_is_cgroup_storage(map) && - bpf_cgroup_storage_assign(env->prog, map)) { + bpf_cgroup_storage_assign(env->prog->aux, map)) { verbose(env, "only one cgroup storage of each type is allowed\n"); fdput(f); return -EBUSY; @@ -8298,18 +8316,8 @@ next_insn: /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { - enum bpf_cgroup_storage_type stype; - int i; - - for_each_cgroup_storage_type(stype) { - if (!env->prog->aux->cgroup_storage[stype]) - continue; - bpf_cgroup_storage_release(env->prog, - env->prog->aux->cgroup_storage[stype]); - } - - for (i = 0; i < env->used_map_cnt; i++) - bpf_map_put(env->used_maps[i]); + __bpf_free_used_maps(env->prog->aux, env->used_maps, + env->used_map_cnt); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ @@ -9282,7 +9290,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; - if (prog->jit_requested && !expect_blinding && + if (env->allow_ptr_leaks && !expect_blinding && + prog->jit_requested && !bpf_map_key_poisoned(aux) && !bpf_map_ptr_poisoned(aux) && !bpf_map_ptr_unpriv(aux)) { diff --git a/kernel/cpu.c b/kernel/cpu.c index a59cc980adad..4dc279ed3b2d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1909,6 +1909,78 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) } EXPORT_SYMBOL(__cpuhp_remove_state); +#ifdef CONFIG_HOTPLUG_SMT +static void cpuhp_offline_cpu_device(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + dev->offline = true; + /* Tell user space about the state change */ + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); +} + +static void cpuhp_online_cpu_device(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + dev->offline = false; + /* Tell user space about the state change */ + kobject_uevent(&dev->kobj, KOBJ_ONLINE); +} + +int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) +{ + int cpu, ret = 0; + + cpu_maps_update_begin(); + for_each_online_cpu(cpu) { + if (topology_is_primary_thread(cpu)) + continue; + ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); + if (ret) + break; + /* + * As this needs to hold the cpu maps lock it's impossible + * to call device_offline() because that ends up calling + * cpu_down() which takes cpu maps lock. cpu maps lock + * needs to be held as this might race against in kernel + * abusers of the hotplug machinery (thermal management). + * + * So nothing would update device:offline state. That would + * leave the sysfs entry stale and prevent onlining after + * smt control has been changed to 'off' again. This is + * called under the sysfs hotplug lock, so it is properly + * serialized against the regular offline usage. + */ + cpuhp_offline_cpu_device(cpu); + } + if (!ret) + cpu_smt_control = ctrlval; + cpu_maps_update_done(); + return ret; +} + +int cpuhp_smt_enable(void) +{ + int cpu, ret = 0; + + cpu_maps_update_begin(); + cpu_smt_control = CPU_SMT_ENABLED; + for_each_present_cpu(cpu) { + /* Skip online CPUs and CPUs on offline nodes */ + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) + continue; + ret = _cpu_up(cpu, 0, CPUHP_ONLINE); + if (ret) + break; + /* See comment in cpuhp_smt_disable() */ + cpuhp_online_cpu_device(cpu); + } + cpu_maps_update_done(); + return ret; +} +#endif + #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) static ssize_t show_cpuhp_state(struct device *dev, struct device_attribute *attr, char *buf) @@ -2063,77 +2135,6 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = { #ifdef CONFIG_HOTPLUG_SMT -static void cpuhp_offline_cpu_device(unsigned int cpu) -{ - struct device *dev = get_cpu_device(cpu); - - dev->offline = true; - /* Tell user space about the state change */ - kobject_uevent(&dev->kobj, KOBJ_OFFLINE); -} - -static void cpuhp_online_cpu_device(unsigned int cpu) -{ - struct device *dev = get_cpu_device(cpu); - - dev->offline = false; - /* Tell user space about the state change */ - kobject_uevent(&dev->kobj, KOBJ_ONLINE); -} - -int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) -{ - int cpu, ret = 0; - - cpu_maps_update_begin(); - for_each_online_cpu(cpu) { - if (topology_is_primary_thread(cpu)) - continue; - ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); - if (ret) - break; - /* - * As this needs to hold the cpu maps lock it's impossible - * to call device_offline() because that ends up calling - * cpu_down() which takes cpu maps lock. cpu maps lock - * needs to be held as this might race against in kernel - * abusers of the hotplug machinery (thermal management). - * - * So nothing would update device:offline state. That would - * leave the sysfs entry stale and prevent onlining after - * smt control has been changed to 'off' again. This is - * called under the sysfs hotplug lock, so it is properly - * serialized against the regular offline usage. - */ - cpuhp_offline_cpu_device(cpu); - } - if (!ret) - cpu_smt_control = ctrlval; - cpu_maps_update_done(); - return ret; -} - -int cpuhp_smt_enable(void) -{ - int cpu, ret = 0; - - cpu_maps_update_begin(); - cpu_smt_control = CPU_SMT_ENABLED; - for_each_present_cpu(cpu) { - /* Skip online CPUs and CPUs on offline nodes */ - if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) - continue; - ret = _cpu_up(cpu, 0, CPUHP_ONLINE); - if (ret) - break; - /* See comment in cpuhp_smt_disable() */ - cpuhp_online_cpu_device(cpu); - } - cpu_maps_update_done(); - return ret; -} - - static ssize_t __store_smt_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) diff --git a/kernel/cred.c b/kernel/cred.c index c0a4c12d38b2..809a985b1793 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -175,8 +175,8 @@ void exit_creds(struct task_struct *tsk) put_cred(cred); #ifdef CONFIG_KEYS_REQUEST_CACHE - key_put(current->cached_requested_key); - current->cached_requested_key = NULL; + key_put(tsk->cached_requested_key); + tsk->cached_requested_key = NULL; #endif } @@ -223,7 +223,7 @@ struct cred *cred_alloc_blank(void) new->magic = CRED_MAGIC; #endif - if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) + if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) goto error; return new; @@ -282,7 +282,7 @@ struct cred *prepare_creds(void) new->security = NULL; #endif - if (security_prepare_creds(new, old, GFP_KERNEL) < 0) + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error; validate_creds(new); return new; @@ -715,7 +715,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) #ifdef CONFIG_SECURITY new->security = NULL; #endif - if (security_prepare_creds(new, old, GFP_KERNEL) < 0) + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error; put_cred(old); diff --git a/kernel/events/core.c b/kernel/events/core.c index 4ff86d57f9e5..2173c23c25b4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10523,7 +10523,7 @@ again: goto unlock; } - list_for_each_entry_rcu(pmu, &pmus, entry) { + list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) { ret = perf_try_init_event(pmu, event); if (!ret) goto unlock; @@ -11465,8 +11465,10 @@ SYSCALL_DEFINE5(perf_event_open, } } - if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) + if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { + err = -EINVAL; goto err_locked; + } /* * Must be under the same ctx::mutex as perf_install_in_context(), diff --git a/kernel/exit.c b/kernel/exit.c index bcbd59888e67..2833ffb0c211 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -517,10 +517,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father, } write_unlock_irq(&tasklist_lock); - if (unlikely(pid_ns == &init_pid_ns)) { - panic("Attempted to kill init! exitcode=0x%08x\n", - father->signal->group_exit_code ?: father->exit_code); - } list_for_each_entry_safe(p, n, dead, ptrace_entry) { list_del_init(&p->ptrace_entry); @@ -766,6 +762,14 @@ void __noreturn do_exit(long code) acct_update_integrals(tsk); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { + /* + * If the last thread of global init has exited, panic + * immediately to get a useable coredump. + */ + if (unlikely(is_global_init(tsk))) + panic("Attempted to kill init! exitcode=0x%08x\n", + tsk->signal->group_exit_code ?: (int)code); + #ifdef CONFIG_POSIX_TIMERS hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); diff --git a/kernel/fork.c b/kernel/fork.c index 2508a4f238a3..080809560072 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2578,6 +2578,16 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, #endif #ifdef __ARCH_WANT_SYS_CLONE3 + +/* + * copy_thread implementations handle CLONE_SETTLS by reading the TLS value from + * the registers containing the syscall arguments for clone. This doesn't work + * with clone3 since the TLS value is passed in clone_args instead. + */ +#ifndef CONFIG_HAVE_COPY_THREAD_TLS +#error clone3 requires copy_thread_tls support in arch +#endif + noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, struct clone_args __user *uargs, size_t usize) diff --git a/kernel/futex.c b/kernel/futex.c index 03c518e9747e..0cf84c8664f2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1178,6 +1178,7 @@ out_error: /** * wait_for_owner_exiting - Block until the owner has exited + * @ret: owner's current futex lock status * @exiting: Pointer to the exiting task * * Caller must hold a refcount on @exiting. diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 32282e7112d3..32406ef0d6a2 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -482,7 +482,7 @@ static struct lock_trace *save_trace(void) struct lock_trace *trace, *t2; struct hlist_head *hash_head; u32 hash; - unsigned int max_entries; + int max_entries; BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE); BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES); @@ -490,10 +490,8 @@ static struct lock_trace *save_trace(void) trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - LOCK_TRACE_SIZE_IN_LONGS; - trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); - if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES - - LOCK_TRACE_SIZE_IN_LONGS - 1) { + if (max_entries <= 0) { if (!debug_locks_off_graph_unlock()) return NULL; @@ -502,6 +500,7 @@ static struct lock_trace *save_trace(void) return NULL; } + trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); hash = jhash(trace->entries, trace->nr_entries * sizeof(trace->entries[0]), 0); diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 54cc5f9286e9..5352ce50a97e 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -733,9 +733,6 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne */ void __sched mutex_unlock(struct mutex *lock) { -#ifdef CONFIG_DEBUG_MUTEXES - WARN_ON(in_interrupt()); -#endif #ifndef CONFIG_DEBUG_LOCK_ALLOC if (__mutex_unlock_fast(lock)) return; @@ -1416,7 +1413,6 @@ int __sched mutex_trylock(struct mutex *lock) #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(lock->magic != lock); - WARN_ON(in_interrupt()); #endif locked = __mutex_trylock(lock); diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 44e68761f432..0d9b6be9ecc8 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1226,8 +1226,8 @@ wait: * In this case, we attempt to acquire the lock again * without sleeping. */ - if ((wstate == WRITER_HANDOFF) && - (rwsem_spin_on_owner(sem, 0) == OWNER_NULL)) + if (wstate == WRITER_HANDOFF && + rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL) goto trylock_again; /* Block until there are no active lockers. */ diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c index 399669f7eba8..472dd462a40c 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -51,19 +51,19 @@ EXPORT_SYMBOL(__rwlock_init); static void spin_dump(raw_spinlock_t *lock, const char *msg) { - struct task_struct *owner = NULL; + struct task_struct *owner = READ_ONCE(lock->owner); - if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) - owner = lock->owner; + if (owner == SPINLOCK_OWNER_INIT) + owner = NULL; printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", msg, raw_smp_processor_id(), current->comm, task_pid_nr(current)); printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, " ".owner_cpu: %d\n", - lock, lock->magic, + lock, READ_ONCE(lock->magic), owner ? owner->comm : "<none>", owner ? task_pid_nr(owner) : -1, - lock->owner_cpu); + READ_ONCE(lock->owner_cpu)); dump_stack(); } @@ -80,16 +80,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg) static inline void debug_spin_lock_before(raw_spinlock_t *lock) { - SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); - SPIN_BUG_ON(lock->owner == current, lock, "recursion"); - SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), + SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic"); + SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion"); + SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(), lock, "cpu recursion"); } static inline void debug_spin_lock_after(raw_spinlock_t *lock) { - lock->owner_cpu = raw_smp_processor_id(); - lock->owner = current; + WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id()); + WRITE_ONCE(lock->owner, current); } static inline void debug_spin_unlock(raw_spinlock_t *lock) @@ -99,8 +99,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock) SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); - lock->owner = SPINLOCK_OWNER_INIT; - lock->owner_cpu = -1; + WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT); + WRITE_ONCE(lock->owner_cpu, -1); } /* @@ -187,8 +187,8 @@ static inline void debug_write_lock_before(rwlock_t *lock) static inline void debug_write_lock_after(rwlock_t *lock) { - lock->owner_cpu = raw_smp_processor_id(); - lock->owner = current; + WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id()); + WRITE_ONCE(lock->owner, current); } static inline void debug_write_unlock(rwlock_t *lock) @@ -197,8 +197,8 @@ static inline void debug_write_unlock(rwlock_t *lock) RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); - lock->owner = SPINLOCK_OWNER_INIT; - lock->owner_cpu = -1; + WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT); + WRITE_ONCE(lock->owner_cpu, -1); } void do_raw_write_lock(rwlock_t *lock) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index cb9ddcc08119..43d6179508d6 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -264,12 +264,17 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) return ret; } -static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) +static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns, + unsigned int mode) { + int ret; + if (mode & PTRACE_MODE_NOAUDIT) - return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); + ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT); else - return has_ns_capability(current, ns, CAP_SYS_PTRACE); + ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE); + + return ret == 0; } /* Returns 0 on success, -errno on denial. */ @@ -321,7 +326,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) gid_eq(caller_gid, tcred->sgid) && gid_eq(caller_gid, tcred->gid)) goto ok; - if (ptrace_has_cap(tcred->user_ns, mode)) + if (ptrace_has_cap(cred, tcred->user_ns, mode)) goto ok; rcu_read_unlock(); return -EPERM; @@ -340,7 +345,7 @@ ok: mm = task->mm; if (mm && ((get_dumpable(mm) != SUID_DUMP_USER) && - !ptrace_has_cap(mm->user_ns, mode))) + !ptrace_has_cap(cred, mm->user_ns, mode))) return -EPERM; return security_ptrace_access_check(task, mode); diff --git a/kernel/rseq.c b/kernel/rseq.c index 27c48eb7de40..a4f86a9d6937 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -310,6 +310,8 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int ret; if (flags & RSEQ_FLAG_UNREGISTER) { + if (flags & ~RSEQ_FLAG_UNREGISTER) + return -EINVAL; /* Unregister rseq for current thread. */ if (current->rseq != rseq || !current->rseq) return -EINVAL; diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index b5dcd1d83c7f..7c2fe50fd76d 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -5,6 +5,8 @@ * Copyright (C) 2016, Intel Corporation * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ +#include <linux/cpufreq.h> + #include "sched.h" DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); @@ -57,3 +59,19 @@ void cpufreq_remove_update_util_hook(int cpu) rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL); } EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook); + +/** + * cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated. + * @policy: cpufreq policy to check. + * + * Return 'true' if: + * - the local and remote CPUs share @policy, + * - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going + * offline (in which case it is not expected to run cpufreq updates any more). + */ +bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) +{ + return cpumask_test_cpu(smp_processor_id(), policy->cpus) || + (policy->dvfs_possible_from_any_cpu && + rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data))); +} diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 322ca8860f54..9b8916fd00a2 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -82,12 +82,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) * by the hardware, as calculating the frequency is pointless if * we cannot in fact act on it. * - * For the slow switching platforms, the kthread is always scheduled on - * the right set of CPUs and any CPU can find the next frequency and - * schedule the kthread. + * This is needed on the slow switching platforms too to prevent CPUs + * going offline from leaving stale IRQ work items behind. */ - if (sg_policy->policy->fast_switch_enabled && - !cpufreq_this_cpu_can_update(sg_policy->policy)) + if (!cpufreq_this_cpu_can_update(sg_policy->policy)) return false; if (unlikely(sg_policy->limits_changed)) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 08a233e97a01..ba749f579714 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7328,7 +7328,14 @@ static int detach_tasks(struct lb_env *env) load < 16 && !env->sd->nr_balance_failed) goto next; - if (load/2 > env->imbalance) + /* + * Make sure that we don't migrate too much load. + * Nevertheless, let relax the constraint if + * scheduler fails to find a good waiting task to + * migrate. + */ + if (load/2 > env->imbalance && + env->sd->nr_balance_failed <= env->sd->cache_nice_tries) goto next; env->imbalance -= load; @@ -8417,6 +8424,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, if (!idlest) return NULL; + /* The local group has been skipped because of CPU affinity */ + if (!local) + return idlest; + /* * If the local group is idler than the selected idlest group * don't try and push the task. diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 517e3719027e..ce8f6748678a 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -185,7 +185,8 @@ static void group_init(struct psi_group *group) for_each_possible_cpu(cpu) seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); - group->avg_next_update = sched_clock() + psi_period; + group->avg_last_update = sched_clock(); + group->avg_next_update = group->avg_last_update + psi_period; INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); mutex_init(&group->avgs_lock); /* Init trigger-related members */ @@ -481,7 +482,7 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value) u32 remaining; remaining = win->size - elapsed; - growth += div_u64(win->prev_growth * remaining, win->size); + growth += div64_u64(win->prev_growth * remaining, win->size); } return growth; diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 12d2227e5786..b6ea3dcb57bf 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -1026,6 +1026,13 @@ static long seccomp_notify_recv(struct seccomp_filter *filter, struct seccomp_notif unotif; ssize_t ret; + /* Verify that we're not given garbage to keep struct extensible. */ + ret = check_zeroed_user(buf, sizeof(unotif)); + if (ret < 0) + return ret; + if (!ret) + return -EINVAL; + memset(&unotif, 0, sizeof(unotif)); ret = down_interruptible(&filter->notif->request); diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 13a0f2e6ebc2..e2ac0e37c4ae 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -554,25 +554,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; - struct taskstats *stats; + struct taskstats *stats_new, *stats; - if (sig->stats || thread_group_empty(tsk)) - goto ret; + /* Pairs with smp_store_release() below. */ + stats = smp_load_acquire(&sig->stats); + if (stats || thread_group_empty(tsk)) + return stats; /* No problem if kmem_cache_zalloc() fails */ - stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); + stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); spin_lock_irq(&tsk->sighand->siglock); - if (!sig->stats) { - sig->stats = stats; - stats = NULL; + stats = sig->stats; + if (!stats) { + /* + * Pairs with smp_store_release() above and order the + * kmem_cache_zalloc(). + */ + smp_store_release(&sig->stats, stats_new); + stats = stats_new; + stats_new = NULL; } spin_unlock_irq(&tsk->sighand->siglock); - if (stats) - kmem_cache_free(taskstats_cache, stats); -ret: - return sig->stats; + if (stats_new) + kmem_cache_free(taskstats_cache, stats_new); + + return stats; } /* Send pid data out on exit */ diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index ec960bb939fd..200fb2d3be99 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c @@ -14,8 +14,6 @@ #include "posix-timers.h" -static void delete_clock(struct kref *kref); - /* * Returns NULL if the posix_clock instance attached to 'fp' is old and stale. */ @@ -125,7 +123,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) err = 0; if (!err) { - kref_get(&clk->kref); + get_device(clk->dev); fp->private_data = clk; } out: @@ -141,7 +139,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp) if (clk->ops.release) err = clk->ops.release(clk); - kref_put(&clk->kref, delete_clock); + put_device(clk->dev); fp->private_data = NULL; @@ -161,38 +159,35 @@ static const struct file_operations posix_clock_file_operations = { #endif }; -int posix_clock_register(struct posix_clock *clk, dev_t devid) +int posix_clock_register(struct posix_clock *clk, struct device *dev) { int err; - kref_init(&clk->kref); init_rwsem(&clk->rwsem); cdev_init(&clk->cdev, &posix_clock_file_operations); + err = cdev_device_add(&clk->cdev, dev); + if (err) { + pr_err("%s unable to add device %d:%d\n", + dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt)); + return err; + } clk->cdev.owner = clk->ops.owner; - err = cdev_add(&clk->cdev, devid, 1); + clk->dev = dev; - return err; + return 0; } EXPORT_SYMBOL_GPL(posix_clock_register); -static void delete_clock(struct kref *kref) -{ - struct posix_clock *clk = container_of(kref, struct posix_clock, kref); - - if (clk->release) - clk->release(clk); -} - void posix_clock_unregister(struct posix_clock *clk) { - cdev_del(&clk->cdev); + cdev_device_del(&clk->cdev, clk->dev); down_write(&clk->rwsem); clk->zombie = true; up_write(&clk->rwsem); - kref_put(&clk->kref, delete_clock); + put_device(clk->dev); } EXPORT_SYMBOL_GPL(posix_clock_unregister); diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c index 67df65f887ac..20c65a7d4e3a 100644 --- a/kernel/time/posix-stubs.c +++ b/kernel/time/posix-stubs.c @@ -151,6 +151,9 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, #ifdef CONFIG_COMPAT COMPAT_SYS_NI(timer_create); +#endif + +#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA) COMPAT_SYS_NI(getitimer); COMPAT_SYS_NI(setitimer); #endif diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 8b192e67aabc..a792d21cac64 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -58,8 +58,9 @@ static void tick_do_update_jiffies64(ktime_t now) /* * Do a quick check without holding jiffies_lock: + * The READ_ONCE() pairs with two updates done later in this function. */ - delta = ktime_sub(now, last_jiffies_update); + delta = ktime_sub(now, READ_ONCE(last_jiffies_update)); if (delta < tick_period) return; @@ -70,8 +71,9 @@ static void tick_do_update_jiffies64(ktime_t now) if (delta >= tick_period) { delta = ktime_sub(delta, tick_period); - last_jiffies_update = ktime_add(last_jiffies_update, - tick_period); + /* Pairs with the lockless read in this function. */ + WRITE_ONCE(last_jiffies_update, + ktime_add(last_jiffies_update, tick_period)); /* Slow path for long timeouts */ if (unlikely(delta >= tick_period)) { @@ -79,8 +81,10 @@ static void tick_do_update_jiffies64(ktime_t now) ticks = ktime_divns(delta, incr); - last_jiffies_update = ktime_add_ns(last_jiffies_update, - incr * ticks); + /* Pairs with the lockless read in this function. */ + WRITE_ONCE(last_jiffies_update, + ktime_add_ns(last_jiffies_update, + incr * ticks)); } do_timer(++ticks); diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index a2659735db73..1af321dec0f1 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -96,6 +96,20 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, return 0; } +/* + * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct + * functions. But those archs currently don't support direct functions + * anyway, and ftrace_find_rec_direct() is just a stub for them. + * Define MCOUNT_INSN_SIZE to keep those archs compiling. + */ +#ifndef MCOUNT_INSN_SIZE +/* Make sure this only works without direct calls */ +# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +# error MCOUNT_INSN_SIZE not defined with direct calls enabled +# endif +# define MCOUNT_INSN_SIZE 0 +#endif + int function_graph_enter(unsigned long ret, unsigned long func, unsigned long frame_pointer, unsigned long *retp) { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ac99a3500076..9bf1f2cd515e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -526,8 +526,7 @@ static int function_stat_show(struct seq_file *m, void *v) } #ifdef CONFIG_FUNCTION_GRAPH_TRACER - avg = rec->time; - do_div(avg, rec->counter); + avg = div64_ul(rec->time, rec->counter); if (tracing_thresh && (avg < tracing_thresh)) goto out; #endif @@ -553,7 +552,8 @@ static int function_stat_show(struct seq_file *m, void *v) * Divide only 1000 for ns^2 -> us^2 conversion. * trace_print_graph_duration will divide 1000 again. */ - do_div(stddev, rec->counter * (rec->counter - 1) * 1000); + stddev = div64_ul(stddev, + rec->counter * (rec->counter - 1) * 1000); } trace_seq_init(&s); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6c75410f9698..ddb7e7f5fe8d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4685,6 +4685,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { + if ((mask == TRACE_ITER_RECORD_TGID) || + (mask == TRACE_ITER_RECORD_CMD)) + lockdep_assert_held(&event_mutex); + /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; @@ -4752,6 +4756,7 @@ static int trace_set_options(struct trace_array *tr, char *option) cmp += len; + mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); @@ -4762,6 +4767,7 @@ static int trace_set_options(struct trace_array *tr, char *option) ret = set_tracer_flag(tr, 1 << ret, !neg); mutex_unlock(&trace_types_lock); + mutex_unlock(&event_mutex); /* * If the first trailing whitespace is replaced with '\0' by strstrip, @@ -8076,9 +8082,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, if (val != 0 && val != 1) return -EINVAL; + mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1 << index, val); mutex_unlock(&trace_types_lock); + mutex_unlock(&event_mutex); if (ret < 0) return ret; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c6de3cebc127..a5b614cc3887 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable) struct trace_event_file *file; struct trace_array *tr; - mutex_lock(&event_mutex); + lockdep_assert_held(&event_mutex); + do_for_each_event_file(tr, file) { if (!(file->flags & EVENT_FILE_FL_ENABLED)) @@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable) clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); } } while_for_each_event_file(); - mutex_unlock(&event_mutex); } void trace_event_enable_tgid_record(bool enable) @@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable) struct trace_event_file *file; struct trace_array *tr; - mutex_lock(&event_mutex); + lockdep_assert_held(&event_mutex); + do_for_each_event_file(tr, file) { if (!(file->flags & EVENT_FILE_FL_ENABLED)) continue; @@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable) &file->flags); } } while_for_each_event_file(); - mutex_unlock(&event_mutex); } static int __ftrace_event_enable_disable(struct trace_event_file *file, diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index c9a74f82b14a..bf44f6bbd0c3 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1662,7 +1662,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); return -EINVAL; fail_mem: - kfree(filter); + __free_filter(filter); /* If any call succeeded, we still need to sync */ if (!fail) tracepoint_synchronize_unregister(); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index f49d1a36d3ae..f62de5f43e79 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -911,7 +911,26 @@ static notrace void trace_event_raw_event_synth(void *__data, strscpy(str_field, str_val, STR_VAR_LEN_MAX); n_u64 += STR_VAR_LEN_MAX / sizeof(u64); } else { - entry->fields[n_u64] = var_ref_vals[var_ref_idx + i]; + struct synth_field *field = event->fields[i]; + u64 val = var_ref_vals[var_ref_idx + i]; + + switch (field->size) { + case 1: + *(u8 *)&entry->fields[n_u64] = (u8)val; + break; + + case 2: + *(u16 *)&entry->fields[n_u64] = (u16)val; + break; + + case 4: + *(u32 *)&entry->fields[n_u64] = (u32)val; + break; + + default: + entry->fields[n_u64] = val; + break; + } n_u64++; } } diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c index d45079ee62f8..22bcf7c51d1e 100644 --- a/kernel/trace/trace_events_inject.c +++ b/kernel/trace/trace_events_inject.c @@ -195,7 +195,7 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry) unsigned long irq_flags; void *entry = NULL; int entry_size; - u64 val; + u64 val = 0; int len; entry = trace_alloc_entry(call, &entry_size); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 5e43b9664eca..617e297f46dc 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -630,7 +630,7 @@ static void start_wakeup_tracer(struct trace_array *tr) if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_migrate_task\n"); - return; + goto fail_deprobe_sched_switch; } wakeup_reset(tr); @@ -648,6 +648,8 @@ static void start_wakeup_tracer(struct trace_array *tr) printk(KERN_ERR "failed to start wakeup tracer\n"); return; +fail_deprobe_sched_switch: + unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); fail_deprobe_wake_new: unregister_trace_sched_wakeup_new(probe_wakeup, NULL); fail_deprobe: diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index 344e4c1aa09c..87de6edafd14 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c @@ -381,7 +381,7 @@ int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { - unsigned int save_len = s->seq.len; + unsigned int save_len = s->seq.len; if (s->full) return 0; diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4df9a209f7ca..c557f42a9397 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -283,6 +283,11 @@ static void check_stack(unsigned long ip, unsigned long *stack) local_irq_restore(flags); } +/* Some archs may not define MCOUNT_INSN_SIZE */ +#ifndef MCOUNT_INSN_SIZE +# define MCOUNT_INSN_SIZE 0 +#endif + static void stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 9a1c22310323..9e31bfc818ff 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -148,8 +148,8 @@ static int tracing_map_cmp_atomic64(void *val_a, void *val_b) #define DEFINE_TRACING_MAP_CMP_FN(type) \ static int tracing_map_cmp_##type(void *val_a, void *val_b) \ { \ - type a = *(type *)val_a; \ - type b = *(type *)val_b; \ + type a = (type)(*(u64 *)val_a); \ + type b = (type)(*(u64 *)val_b); \ \ return (a > b) ? 1 : ((a < b) ? -1 : 0); \ } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d1842fe756d5..5ffe144c9794 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1483,6 +1483,55 @@ config PROVIDE_OHCI1394_DMA_INIT See Documentation/debugging-via-ohci1394.txt for more information. +source "samples/Kconfig" + +config ARCH_HAS_DEVMEM_IS_ALLOWED + bool + +config STRICT_DEVMEM + bool "Filter access to /dev/mem" + depends on MMU && DEVMEM + depends on ARCH_HAS_DEVMEM_IS_ALLOWED + default y if PPC || X86 || ARM64 + help + If this option is disabled, you allow userspace (root) access to all + of memory, including kernel and userspace memory. Accidental + access to this is obviously disastrous, but specific access can + be used by people debugging the kernel. Note that with PAT support + enabled, even in this case there are restrictions on /dev/mem + use due to the cache aliasing requirements. + + If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem + file only allows userspace access to PCI space and the BIOS code and + data regions. This is sufficient for dosemu and X and all common + users of /dev/mem. + + If in doubt, say Y. + +config IO_STRICT_DEVMEM + bool "Filter I/O access to /dev/mem" + depends on STRICT_DEVMEM + help + If this option is disabled, you allow userspace (root) access to all + io-memory regardless of whether a driver is actively using that + range. Accidental access to this is obviously disastrous, but + specific access can be used by people debugging kernel drivers. + + If this option is switched on, the /dev/mem file only allows + userspace access to *idle* io-memory ranges (see /proc/iomem) This + may break traditional users of /dev/mem (dosemu, legacy X, etc...) + if the driver using a given range cannot be disabled. + + If in doubt, say Y. + +menu "$(SRCARCH) Debugging" + +source "arch/$(SRCARCH)/Kconfig.debug" + +endmenu + +menu "Kernel Testing and Coverage" + source "lib/kunit/Kconfig" config NOTIFIER_ERROR_INJECTION @@ -1643,10 +1692,6 @@ config FAULT_INJECTION_STACKTRACE_FILTER help Provide stacktrace filter for fault-injection capabilities -endmenu # "Kernel Testing and Coverage" - -menu "Kernel Testing and Coverage" - config ARCH_HAS_KCOV bool help @@ -2130,52 +2175,7 @@ config MEMTEST memtest=17, mean do 17 test patterns. If you are unsure how to answer this question, answer N. -source "samples/Kconfig" - -config ARCH_HAS_DEVMEM_IS_ALLOWED - bool - -config STRICT_DEVMEM - bool "Filter access to /dev/mem" - depends on MMU && DEVMEM - depends on ARCH_HAS_DEVMEM_IS_ALLOWED - default y if PPC || X86 || ARM64 - ---help--- - If this option is disabled, you allow userspace (root) access to all - of memory, including kernel and userspace memory. Accidental - access to this is obviously disastrous, but specific access can - be used by people debugging the kernel. Note that with PAT support - enabled, even in this case there are restrictions on /dev/mem - use due to the cache aliasing requirements. - - If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem - file only allows userspace access to PCI space and the BIOS code and - data regions. This is sufficient for dosemu and X and all common - users of /dev/mem. - - If in doubt, say Y. -config IO_STRICT_DEVMEM - bool "Filter I/O access to /dev/mem" - depends on STRICT_DEVMEM - ---help--- - If this option is disabled, you allow userspace (root) access to all - io-memory regardless of whether a driver is actively using that - range. Accidental access to this is obviously disastrous, but - specific access can be used by people debugging kernel drivers. - - If this option is switched on, the /dev/mem file only allows - userspace access to *idle* io-memory ranges (see /proc/iomem) This - may break traditional users of /dev/mem (dosemu, legacy X, etc...) - if the driver using a given range cannot be disabled. - - If in doubt, say Y. - -menu "$(SRCARCH) Debugging" - -source "arch/$(SRCARCH)/Kconfig.debug" - -endmenu config HYPERV_TESTING bool "Microsoft Hyper-V driver testing" @@ -2184,4 +2184,6 @@ config HYPERV_TESTING help Select this option to enable Hyper-V vmbus testing. +endmenu # "Kernel Testing and Coverage" + endmenu # Kernel hacking diff --git a/lib/iov_iter.c b/lib/iov_iter.c index fb29c02c6a3c..51595bf3af85 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1222,11 +1222,12 @@ EXPORT_SYMBOL(iov_iter_discard); unsigned long iov_iter_alignment(const struct iov_iter *i) { - unsigned int p_mask = i->pipe->ring_size - 1; unsigned long res = 0; size_t size = i->count; if (unlikely(iov_iter_is_pipe(i))) { + unsigned int p_mask = i->pipe->ring_size - 1; + if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) return size | i->iov_offset; return size; diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 33feec8989f1..af88d1346dd7 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -650,8 +650,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, if (!sbq_wait->sbq) { sbq_wait->sbq = sbq; atomic_inc(&sbq->ws_active); + add_wait_queue(&ws->wait, &sbq_wait->wait); } - add_wait_queue(&ws->wait, &sbq_wait->wait); } EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index 9ecfd3b547ba..42bd8ab955fa 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -221,6 +221,7 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res) return 0; } +static __maybe_unused int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) { int ret = __cvdso_clock_getres_common(clock, res); diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c index 7dd602d7f8db..ad9d5b1c4473 100644 --- a/mm/gup_benchmark.c +++ b/mm/gup_benchmark.c @@ -26,6 +26,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd, unsigned long i, nr_pages, addr, next; int nr; struct page **pages; + int ret = 0; if (gup->size > ULONG_MAX) return -EINVAL; @@ -63,7 +64,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd, NULL); break; default: - return -1; + kvfree(pages); + ret = -EINVAL; + goto out; } if (nr <= 0) @@ -85,7 +88,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd, gup->put_delta_usec = ktime_us_delta(end_time, start_time); kvfree(pages); - return 0; +out: + return ret; } static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 41a0fbddc96b..a88093213674 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -527,13 +527,13 @@ void prep_transhuge_page(struct page *page) set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); } -static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len, +static unsigned long __thp_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, loff_t off, unsigned long flags, unsigned long size) { - unsigned long addr; loff_t off_end = off + len; loff_t off_align = round_up(off, size); - unsigned long len_pad; + unsigned long len_pad, ret; if (off_end <= off_align || (off_end - off_align) < size) return 0; @@ -542,30 +542,40 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long le if (len_pad < len || (off + len_pad) < off) return 0; - addr = current->mm->get_unmapped_area(filp, 0, len_pad, + ret = current->mm->get_unmapped_area(filp, addr, len_pad, off >> PAGE_SHIFT, flags); - if (IS_ERR_VALUE(addr)) + + /* + * The failure might be due to length padding. The caller will retry + * without the padding. + */ + if (IS_ERR_VALUE(ret)) return 0; - addr += (off - addr) & (size - 1); - return addr; + /* + * Do not try to align to THP boundary if allocation at the address + * hint succeeds. + */ + if (ret == addr) + return addr; + + ret += (off - ret) & (size - 1); + return ret; } unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { + unsigned long ret; loff_t off = (loff_t)pgoff << PAGE_SHIFT; - if (addr) - goto out; if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) goto out; - addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE); - if (addr) - return addr; - - out: + ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); + if (ret) + return ret; +out: return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ac65bb5e38ac..dd8737a94bec 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -27,6 +27,7 @@ #include <linux/swapops.h> #include <linux/jhash.h> #include <linux/numa.h> +#include <linux/llist.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -1136,7 +1137,7 @@ static inline void ClearPageHugeTemporary(struct page *page) page[2].mapping = NULL; } -void free_huge_page(struct page *page) +static void __free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the @@ -1199,6 +1200,54 @@ void free_huge_page(struct page *page) spin_unlock(&hugetlb_lock); } +/* + * As free_huge_page() can be called from a non-task context, we have + * to defer the actual freeing in a workqueue to prevent potential + * hugetlb_lock deadlock. + * + * free_hpage_workfn() locklessly retrieves the linked list of pages to + * be freed and frees them one-by-one. As the page->mapping pointer is + * going to be cleared in __free_huge_page() anyway, it is reused as the + * llist_node structure of a lockless linked list of huge pages to be freed. + */ +static LLIST_HEAD(hpage_freelist); + +static void free_hpage_workfn(struct work_struct *work) +{ + struct llist_node *node; + struct page *page; + + node = llist_del_all(&hpage_freelist); + + while (node) { + page = container_of((struct address_space **)node, + struct page, mapping); + node = node->next; + __free_huge_page(page); + } +} +static DECLARE_WORK(free_hpage_work, free_hpage_workfn); + +void free_huge_page(struct page *page) +{ + /* + * Defer freeing if in non-task context to avoid hugetlb_lock deadlock. + */ + if (!in_task()) { + /* + * Only call schedule_work() if hpage_freelist is previously + * empty. Otherwise, schedule_work() had been called but the + * workfn hasn't retrieved the list yet. + */ + if (llist_add((struct llist_node *)&page->mapping, + &hpage_freelist)) + schedule_work(&free_hpage_work); + return; + } + + __free_huge_page(page); +} + static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) { INIT_LIST_HEAD(&page->lru); diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 2fa710bb6358..c15d8ae68c96 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -778,15 +778,17 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, return 0; } -int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area) +int kasan_populate_vmalloc(unsigned long addr, unsigned long size) { unsigned long shadow_start, shadow_end; int ret; - shadow_start = (unsigned long)kasan_mem_to_shadow(area->addr); + if (!is_vmalloc_or_module_addr((void *)addr)) + return 0; + + shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); - shadow_end = (unsigned long)kasan_mem_to_shadow(area->addr + - area->size); + shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); shadow_end = ALIGN(shadow_end, PAGE_SIZE); ret = apply_to_page_range(&init_mm, shadow_start, @@ -797,10 +799,6 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area) flush_cache_vmap(shadow_start, shadow_end); - kasan_unpoison_shadow(area->addr, requested_size); - - area->flags |= VM_KASAN; - /* * We need to be careful about inter-cpu effects here. Consider: * @@ -843,12 +841,23 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area) * Poison the shadow for a vmalloc region. Called as part of the * freeing process at the time the region is freed. */ -void kasan_poison_vmalloc(void *start, unsigned long size) +void kasan_poison_vmalloc(const void *start, unsigned long size) { + if (!is_vmalloc_or_module_addr(start)) + return; + size = round_up(size, KASAN_SHADOW_SCALE_SIZE); kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID); } +void kasan_unpoison_vmalloc(const void *start, unsigned long size) +{ + if (!is_vmalloc_or_module_addr(start)) + return; + + kasan_unpoison_shadow(start, size); +} + static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, void *unused) { @@ -948,6 +957,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, { void *shadow_start, *shadow_end; unsigned long region_start, region_end; + unsigned long size; region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); @@ -970,9 +980,11 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, shadow_end = kasan_mem_to_shadow((void *)region_end); if (shadow_end > shadow_start) { - apply_to_page_range(&init_mm, (unsigned long)shadow_start, - (unsigned long)(shadow_end - shadow_start), - kasan_depopulate_vmalloc_pte, NULL); + size = shadow_end - shadow_start; + apply_to_existing_page_range(&init_mm, + (unsigned long)shadow_start, + size, kasan_depopulate_vmalloc_pte, + NULL); flush_tlb_kernel_range((unsigned long)shadow_start, (unsigned long)shadow_end); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c5b5f74cfd4d..6c83cf4ed970 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3287,49 +3287,34 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, } } -static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only) +static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) { - unsigned long stat[MEMCG_NR_STAT]; + unsigned long stat[MEMCG_NR_STAT] = {0}; struct mem_cgroup *mi; int node, cpu, i; - int min_idx, max_idx; - - if (slab_only) { - min_idx = NR_SLAB_RECLAIMABLE; - max_idx = NR_SLAB_UNRECLAIMABLE; - } else { - min_idx = 0; - max_idx = MEMCG_NR_STAT; - } - - for (i = min_idx; i < max_idx; i++) - stat[i] = 0; for_each_online_cpu(cpu) - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < MEMCG_NR_STAT; i++) stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < MEMCG_NR_STAT; i++) atomic_long_add(stat[i], &mi->vmstats[i]); - if (!slab_only) - max_idx = NR_VM_NODE_STAT_ITEMS; - for_each_node(node) { struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; struct mem_cgroup_per_node *pi; - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) stat[i] = 0; for_each_online_cpu(cpu) - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) stat[i] += per_cpu( pn->lruvec_stat_cpu->count[i], cpu); for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) - for (i = min_idx; i < max_idx; i++) + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) atomic_long_add(stat[i], &pi->lruvec_stat[i]); } } @@ -3403,13 +3388,9 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) parent = root_mem_cgroup; /* - * Deactivate and reparent kmem_caches. Then flush percpu - * slab statistics to have precise values at the parent and - * all ancestor levels. It's required to keep slab stats - * accurate after the reparenting of kmem_caches. + * Deactivate and reparent kmem_caches. */ memcg_deactivate_kmem_caches(memcg, parent); - memcg_flush_percpu_vmstats(memcg, true); kmemcg_id = memcg->kmemcg_id; BUG_ON(kmemcg_id < 0); @@ -4913,7 +4894,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg) * Flush percpu vmstats and vmevents to guarantee the value correctness * on parent's and all ancestor levels. */ - memcg_flush_percpu_vmstats(memcg, false); + memcg_flush_percpu_vmstats(memcg); memcg_flush_percpu_vmevents(memcg); __mem_cgroup_free(memcg); } diff --git a/mm/memory.c b/mm/memory.c index 606da187d1de..45442d9a4f52 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2021,26 +2021,34 @@ EXPORT_SYMBOL(vm_iomap_memory); static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, - pte_fn_t fn, void *data) + pte_fn_t fn, void *data, bool create) { pte_t *pte; - int err; + int err = 0; spinlock_t *uninitialized_var(ptl); - pte = (mm == &init_mm) ? - pte_alloc_kernel(pmd, addr) : - pte_alloc_map_lock(mm, pmd, addr, &ptl); - if (!pte) - return -ENOMEM; + if (create) { + pte = (mm == &init_mm) ? + pte_alloc_kernel(pmd, addr) : + pte_alloc_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return -ENOMEM; + } else { + pte = (mm == &init_mm) ? + pte_offset_kernel(pmd, addr) : + pte_offset_map_lock(mm, pmd, addr, &ptl); + } BUG_ON(pmd_huge(*pmd)); arch_enter_lazy_mmu_mode(); do { - err = fn(pte++, addr, data); - if (err) - break; + if (create || !pte_none(*pte)) { + err = fn(pte++, addr, data); + if (err) + break; + } } while (addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); @@ -2052,77 +2060,95 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, - pte_fn_t fn, void *data) + pte_fn_t fn, void *data, bool create) { pmd_t *pmd; unsigned long next; - int err; + int err = 0; BUG_ON(pud_huge(*pud)); - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - return -ENOMEM; + if (create) { + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return -ENOMEM; + } else { + pmd = pmd_offset(pud, addr); + } do { next = pmd_addr_end(addr, end); - err = apply_to_pte_range(mm, pmd, addr, next, fn, data); - if (err) - break; + if (create || !pmd_none_or_clear_bad(pmd)) { + err = apply_to_pte_range(mm, pmd, addr, next, fn, data, + create); + if (err) + break; + } } while (pmd++, addr = next, addr != end); return err; } static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, unsigned long addr, unsigned long end, - pte_fn_t fn, void *data) + pte_fn_t fn, void *data, bool create) { pud_t *pud; unsigned long next; - int err; + int err = 0; - pud = pud_alloc(mm, p4d, addr); - if (!pud) - return -ENOMEM; + if (create) { + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return -ENOMEM; + } else { + pud = pud_offset(p4d, addr); + } do { next = pud_addr_end(addr, end); - err = apply_to_pmd_range(mm, pud, addr, next, fn, data); - if (err) - break; + if (create || !pud_none_or_clear_bad(pud)) { + err = apply_to_pmd_range(mm, pud, addr, next, fn, data, + create); + if (err) + break; + } } while (pud++, addr = next, addr != end); return err; } static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, - pte_fn_t fn, void *data) + pte_fn_t fn, void *data, bool create) { p4d_t *p4d; unsigned long next; - int err; + int err = 0; - p4d = p4d_alloc(mm, pgd, addr); - if (!p4d) - return -ENOMEM; + if (create) { + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return -ENOMEM; + } else { + p4d = p4d_offset(pgd, addr); + } do { next = p4d_addr_end(addr, end); - err = apply_to_pud_range(mm, p4d, addr, next, fn, data); - if (err) - break; + if (create || !p4d_none_or_clear_bad(p4d)) { + err = apply_to_pud_range(mm, p4d, addr, next, fn, data, + create); + if (err) + break; + } } while (p4d++, addr = next, addr != end); return err; } -/* - * Scan a region of virtual memory, filling in page tables as necessary - * and calling a provided function on each leaf page table. - */ -int apply_to_page_range(struct mm_struct *mm, unsigned long addr, - unsigned long size, pte_fn_t fn, void *data) +static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, + unsigned long size, pte_fn_t fn, + void *data, bool create) { pgd_t *pgd; unsigned long next; unsigned long end = addr + size; - int err; + int err = 0; if (WARN_ON(addr >= end)) return -EINVAL; @@ -2130,16 +2156,42 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); - err = apply_to_p4d_range(mm, pgd, addr, next, fn, data); + if (!create && pgd_none_or_clear_bad(pgd)) + continue; + err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create); if (err) break; } while (pgd++, addr = next, addr != end); return err; } + +/* + * Scan a region of virtual memory, filling in page tables as necessary + * and calling a provided function on each leaf page table. + */ +int apply_to_page_range(struct mm_struct *mm, unsigned long addr, + unsigned long size, pte_fn_t fn, void *data) +{ + return __apply_to_page_range(mm, addr, size, fn, data, true); +} EXPORT_SYMBOL_GPL(apply_to_page_range); /* + * Scan a region of virtual memory, calling a provided function on + * each leaf page table where it exists. + * + * Unlike apply_to_page_range, this does _not_ fill in page tables + * where they are absent. + */ +int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, + unsigned long size, pte_fn_t fn, void *data) +{ + return __apply_to_page_range(mm, addr, size, fn, data, false); +} +EXPORT_SYMBOL_GPL(apply_to_existing_page_range); + +/* * handle_pte_fault chooses page fault handler according to an entry which was * read non-atomically. Before making any commitment, on those architectures * or configurations (e.g. i386 with PAE) which might give a mix of unmatched diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 55ac23ef11c1..a91a072f2b2c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -483,8 +483,9 @@ static void update_pgdat_span(struct pglist_data *pgdat) pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; } -static void __remove_zone(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages) +void __ref remove_pfn_range_from_zone(struct zone *zone, + unsigned long start_pfn, + unsigned long nr_pages) { struct pglist_data *pgdat = zone->zone_pgdat; unsigned long flags; @@ -499,28 +500,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn, return; #endif + clear_zone_contiguous(zone); + pgdat_resize_lock(zone->zone_pgdat, &flags); shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); update_pgdat_span(pgdat); pgdat_resize_unlock(zone->zone_pgdat, &flags); + + set_zone_contiguous(zone); } -static void __remove_section(struct zone *zone, unsigned long pfn, - unsigned long nr_pages, unsigned long map_offset, - struct vmem_altmap *altmap) +static void __remove_section(unsigned long pfn, unsigned long nr_pages, + unsigned long map_offset, + struct vmem_altmap *altmap) { struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn)); if (WARN_ON_ONCE(!valid_section(ms))) return; - __remove_zone(zone, pfn, nr_pages); sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); } /** - * __remove_pages() - remove sections of pages from a zone - * @zone: zone from which pages need to be removed + * __remove_pages() - remove sections of pages * @pfn: starting pageframe (must be aligned to start of a section) * @nr_pages: number of pages to remove (must be multiple of section size) * @altmap: alternative device page map or %NULL if default memmap is used @@ -530,16 +533,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn, * sure that pages are marked reserved and zones are adjust properly by * calling offline_pages(). */ -void __remove_pages(struct zone *zone, unsigned long pfn, - unsigned long nr_pages, struct vmem_altmap *altmap) +void __remove_pages(unsigned long pfn, unsigned long nr_pages, + struct vmem_altmap *altmap) { unsigned long map_offset = 0; unsigned long nr, start_sec, end_sec; map_offset = vmem_altmap_offset(altmap); - clear_zone_contiguous(zone); - if (check_pfn_span(pfn, nr_pages, "remove")) return; @@ -551,13 +552,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn, cond_resched(); pfns = min(nr_pages, PAGES_PER_SECTION - (pfn & ~PAGE_SECTION_MASK)); - __remove_section(zone, pfn, pfns, map_offset, altmap); + __remove_section(pfn, pfns, map_offset, altmap); pfn += pfns; nr_pages -= pfns; map_offset = 0; } - - set_zone_contiguous(zone); } int set_online_page_callback(online_page_callback_t callback) @@ -869,6 +868,7 @@ failed_addition: (unsigned long long) pfn << PAGE_SHIFT, (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); memory_notify(MEM_CANCEL_ONLINE, &arg); + remove_pfn_range_from_zone(zone, pfn, nr_pages); mem_hotplug_done(); return ret; } @@ -1628,6 +1628,7 @@ static int __ref __offline_pages(unsigned long start_pfn, writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); + remove_pfn_range_from_zone(zone, start_pfn, nr_pages); mem_hotplug_done(); return 0; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 067cf7d3daf5..b2920ae87a61 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2148,18 +2148,22 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, nmask = policy_nodemask(gfp, pol); if (!nmask || node_isset(hpage_node, *nmask)) { mpol_cond_put(pol); + /* + * First, try to allocate THP only on local node, but + * don't reclaim unnecessarily, just compact. + */ page = __alloc_pages_node(hpage_node, - gfp | __GFP_THISNODE, order); + gfp | __GFP_THISNODE | __GFP_NORETRY, order); /* * If hugepage allocations are configured to always * synchronous compact or the vma has been madvised * to prefer hugepage backing, retry allowing remote - * memory as well. + * memory with both reclaim and compact as well. */ if (!page && (gfp & __GFP_DIRECT_RECLAIM)) page = __alloc_pages_node(hpage_node, - gfp | __GFP_NORETRY, order); + gfp, order); goto out; } diff --git a/mm/memremap.c b/mm/memremap.c index 03ccbdfeb697..c51c6bd2fe34 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -120,7 +120,7 @@ void memunmap_pages(struct dev_pagemap *pgmap) mem_hotplug_begin(); if (pgmap->type == MEMORY_DEVICE_PRIVATE) { - __remove_pages(page_zone(first_page), PHYS_PFN(res->start), + __remove_pages(PHYS_PFN(res->start), PHYS_PFN(resource_size(res)), NULL); } else { arch_remove_memory(nid, res->start, resource_size(res), diff --git a/mm/migrate.c b/mm/migrate.c index eae1565285e3..86873b6f38a7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1512,9 +1512,11 @@ static int do_move_pages_to_node(struct mm_struct *mm, /* * Resolves the given address to a struct page, isolates it from the LRU and * puts it to the given pagelist. - * Returns -errno if the page cannot be found/isolated or 0 when it has been - * queued or the page doesn't need to be migrated because it is already on - * the target node + * Returns: + * errno - if the page cannot be found/isolated + * 0 - when it doesn't have to be migrated because it is already on the + * target node + * 1 - when it has been queued */ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, int node, struct list_head *pagelist, bool migrate_all) @@ -1553,7 +1555,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, if (PageHuge(page)) { if (PageHead(page)) { isolate_huge_page(page, pagelist); - err = 0; + err = 1; } } else { struct page *head; @@ -1563,7 +1565,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, if (err) goto out_putpage; - err = 0; + err = 1; list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_cache(head), @@ -1640,8 +1642,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, */ err = add_page_for_migration(mm, addr, current_node, &pagelist, flags & MPOL_MF_MOVE_ALL); - if (!err) + + if (!err) { + /* The page is already on the target node */ + err = store_status(status, i, current_node, 1); + if (err) + goto out_flush; continue; + } else if (err > 0) { + /* The page is successfully queued for migration */ + continue; + } err = store_status(status, i, err, 1); if (err) diff --git a/mm/mmap.c b/mm/mmap.c index 9c648524e4dc..71e4ffc83bcd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm, * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and - * MAP_PRIVATE: - * r: (no) no - * w: (no) no - * x: (yes) yes */ pgprot_t protection_map[16] __ro_after_init = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 71e3acea7817..d58c481b3df8 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -890,7 +890,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message) K(get_mm_counter(mm, MM_FILEPAGES)), K(get_mm_counter(mm, MM_SHMEMPAGES)), from_kuid(&init_user_ns, task_uid(victim)), - mm_pgtables_bytes(mm), victim->signal->oom_score_adj); + mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); task_unlock(victim); /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 50055d2e4ea8..2caf780a42e7 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -201,11 +201,11 @@ static void wb_min_max_ratio(struct bdi_writeback *wb, if (this_bw < tot_bw) { if (min) { min *= this_bw; - do_div(min, tot_bw); + min = div64_ul(min, tot_bw); } if (max < 100) { max *= this_bw; - do_div(max, tot_bw); + max = div64_ul(max, tot_bw); } } @@ -766,7 +766,7 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc) struct wb_domain *dom = dtc_dom(dtc); unsigned long thresh = dtc->thresh; u64 wb_thresh; - long numerator, denominator; + unsigned long numerator, denominator; unsigned long wb_min_ratio, wb_max_ratio; /* @@ -777,7 +777,7 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc) wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100; wb_thresh *= numerator; - do_div(wb_thresh, denominator); + wb_thresh = div64_ul(wb_thresh, denominator); wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); @@ -1102,7 +1102,7 @@ static void wb_update_write_bandwidth(struct bdi_writeback *wb, bw = written - min(written, wb->written_stamp); bw *= HZ; if (unlikely(elapsed > period)) { - do_div(bw, elapsed); + bw = div64_ul(bw, elapsed); avg = bw; goto out; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4785a8a2040e..d047bf7d8fd4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -694,34 +694,27 @@ void prep_compound_page(struct page *page, unsigned int order) #ifdef CONFIG_DEBUG_PAGEALLOC unsigned int _debug_guardpage_minorder; -#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT -DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); -#else +bool _debug_pagealloc_enabled_early __read_mostly + = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); +EXPORT_SYMBOL(_debug_pagealloc_enabled_early); DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); -#endif EXPORT_SYMBOL(_debug_pagealloc_enabled); DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); static int __init early_debug_pagealloc(char *buf) { - bool enable = false; - - if (kstrtobool(buf, &enable)) - return -EINVAL; - - if (enable) - static_branch_enable(&_debug_pagealloc_enabled); - - return 0; + return kstrtobool(buf, &_debug_pagealloc_enabled_early); } early_param("debug_pagealloc", early_debug_pagealloc); -static void init_debug_guardpage(void) +void init_debug_pagealloc(void) { if (!debug_pagealloc_enabled()) return; + static_branch_enable(&_debug_pagealloc_enabled); + if (!debug_guardpage_minorder()) return; @@ -1186,7 +1179,7 @@ static __always_inline bool free_pages_prepare(struct page *page, */ arch_free_page(page, order); - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) kernel_map_pages(page, 1 << order, 0); kasan_free_nondeferred_pages(page, order); @@ -1207,7 +1200,7 @@ static bool free_pcp_prepare(struct page *page) static bool bulkfree_pcp_prepare(struct page *page) { - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) return free_pages_check(page); else return false; @@ -1221,7 +1214,7 @@ static bool bulkfree_pcp_prepare(struct page *page) */ static bool free_pcp_prepare(struct page *page) { - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) return free_pages_prepare(page, 0, true); else return free_pages_prepare(page, 0, false); @@ -1973,10 +1966,6 @@ void __init page_alloc_init_late(void) for_each_populated_zone(zone) set_zone_contiguous(zone); - -#ifdef CONFIG_DEBUG_PAGEALLOC - init_debug_guardpage(); -#endif } #ifdef CONFIG_CMA @@ -2106,7 +2095,7 @@ static inline bool free_pages_prezeroed(void) */ static inline bool check_pcp_refill(struct page *page) { - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) return check_new_page(page); else return false; @@ -2128,7 +2117,7 @@ static inline bool check_pcp_refill(struct page *page) } static inline bool check_new_pcp(struct page *page) { - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) return check_new_page(page); else return false; @@ -2155,7 +2144,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order, set_page_refcounted(page); arch_alloc_page(page, order); - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) kernel_map_pages(page, 1 << order, 1); kasan_alloc_pages(page, order); kernel_poison_pages(page, 1 << order, 1); @@ -4476,8 +4465,11 @@ retry_cpuset: if (page) goto got_pg; - if (order >= pageblock_order && (gfp_mask & __GFP_IO) && - !(gfp_mask & __GFP_RETRY_MAYFAIL)) { + /* + * Checks for costly allocations with __GFP_NORETRY, which + * includes some THP page fault allocations + */ + if (costly_order && (gfp_mask & __GFP_NORETRY)) { /* * If allocating entire pageblock(s) and compaction * failed because all zones are below low watermarks @@ -4498,23 +4490,6 @@ retry_cpuset: if (compact_result == COMPACT_SKIPPED || compact_result == COMPACT_DEFERRED) goto nopage; - } - - /* - * Checks for costly allocations with __GFP_NORETRY, which - * includes THP page fault allocations - */ - if (costly_order && (gfp_mask & __GFP_NORETRY)) { - /* - * If compaction is deferred for high-order allocations, - * it is because sync compaction recently failed. If - * this is the case and the caller requested a THP - * allocation, we do not want to heavily disrupt the - * system, so we fail the allocation instead of entering - * direct reclaim. - */ - if (compact_result == COMPACT_DEFERRED) - goto nopage; /* * Looks like reclaim/compaction is worth trying, but diff --git a/mm/shmem.c b/mm/shmem.c index 165fa6332993..8793e8cc1a48 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2107,9 +2107,10 @@ unsigned long shmem_get_unmapped_area(struct file *file, /* * Our priority is to support MAP_SHARED mapped hugely; * and support MAP_PRIVATE mapped hugely too, until it is COWed. - * But if caller specified an address hint, respect that as before. + * But if caller specified an address hint and we allocated area there + * successfully, respect that as before. */ - if (uaddr) + if (uaddr == addr) return addr; if (shmem_huge != SHMEM_HUGE_FORCE) { @@ -2143,7 +2144,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (inflated_len < len) return addr; - inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); + inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); if (IS_ERR_VALUE(inflated_addr)) return addr; if (inflated_addr & ~PAGE_MASK) diff --git a/mm/slab.c b/mm/slab.c index f1e1840af533..a89633603b2d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1416,7 +1416,7 @@ static void kmem_rcu_free(struct rcu_head *head) #if DEBUG static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) { - if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && + if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && (cachep->size % PAGE_SIZE) == 0) return true; @@ -2008,7 +2008,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) * to check size >= 256. It guarantees that all necessary small * sized slab is initialized in current slab initialization sequence. */ - if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && + if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) && size >= 256 && cachep->object_size > cache_line_size()) { if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { size_t tmp_size = ALIGN(size, PAGE_SIZE); diff --git a/mm/slab_common.c b/mm/slab_common.c index f0ab6d4ceb4c..0d95ddea13b0 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -903,7 +903,8 @@ static void flush_memcg_workqueue(struct kmem_cache *s) * deactivates the memcg kmem_caches through workqueue. Make sure all * previous workitems on workqueue are processed. */ - flush_workqueue(memcg_kmem_cache_wq); + if (likely(memcg_kmem_cache_wq)) + flush_workqueue(memcg_kmem_cache_wq); /* * If we're racing with children kmem_cache deactivation, it might diff --git a/mm/slub.c b/mm/slub.c index d11389710b12..8eafccf75940 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -288,7 +288,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) unsigned long freepointer_addr; void *p; - if (!debug_pagealloc_enabled()) + if (!debug_pagealloc_enabled_static()) return get_freepointer(s, object); freepointer_addr = (unsigned long)object + s->offset; diff --git a/mm/sparse.c b/mm/sparse.c index b20ab7cdac86..3822ecbd8a1f 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -777,7 +777,14 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages, if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) { unsigned long section_nr = pfn_to_section_nr(pfn); - if (!section_is_early) { + /* + * When removing an early section, the usage map is kept (as the + * usage maps of other sections fall into the same page). It + * will be re-used when re-adding the section - which is then no + * longer an early section. If the usage map is PageReserved, it + * was allocated during boot. + */ + if (!PageReserved(virt_to_page(ms->usage))) { kfree(ms->usage); ms->usage = NULL; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4d3b3d60d893..b29ad17edcf5 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1062,6 +1062,26 @@ __alloc_vmap_area(unsigned long size, unsigned long align, } /* + * Free a region of KVA allocated by alloc_vmap_area + */ +static void free_vmap_area(struct vmap_area *va) +{ + /* + * Remove from the busy tree/list. + */ + spin_lock(&vmap_area_lock); + unlink_va(va, &vmap_area_root); + spin_unlock(&vmap_area_lock); + + /* + * Insert/Merge it back to the free tree/list. + */ + spin_lock(&free_vmap_area_lock); + merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list); + spin_unlock(&free_vmap_area_lock); +} + +/* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. */ @@ -1073,6 +1093,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, struct vmap_area *va, *pva; unsigned long addr; int purged = 0; + int ret; BUG_ON(!size); BUG_ON(offset_in_page(size)); @@ -1139,6 +1160,7 @@ retry: va->va_end = addr + size; va->vm = NULL; + spin_lock(&vmap_area_lock); insert_vmap_area(va, &vmap_area_root, &vmap_area_list); spin_unlock(&vmap_area_lock); @@ -1147,6 +1169,12 @@ retry: BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); + ret = kasan_populate_vmalloc(addr, size); + if (ret) { + free_vmap_area(va); + return ERR_PTR(ret); + } + return va; overflow: @@ -1186,26 +1214,6 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb) EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); /* - * Free a region of KVA allocated by alloc_vmap_area - */ -static void free_vmap_area(struct vmap_area *va) -{ - /* - * Remove from the busy tree/list. - */ - spin_lock(&vmap_area_lock); - unlink_va(va, &vmap_area_root); - spin_unlock(&vmap_area_lock); - - /* - * Insert/Merge it back to the free tree/list. - */ - spin_lock(&free_vmap_area_lock); - merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list); - spin_unlock(&free_vmap_area_lock); -} - -/* * Clear the pagetable entries of a given vmap_area */ static void unmap_vmap_area(struct vmap_area *va) @@ -1375,7 +1383,7 @@ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); unmap_vmap_area(va); - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(va->va_start, va->va_end); free_vmap_area_noflush(va); @@ -1673,7 +1681,7 @@ static void vb_free(const void *addr, unsigned long size) vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range((unsigned long)addr, (unsigned long)addr + size); @@ -1771,6 +1779,8 @@ void vm_unmap_ram(const void *mem, unsigned int count) BUG_ON(addr > VMALLOC_END); BUG_ON(!PAGE_ALIGNED(addr)); + kasan_poison_vmalloc(mem, size); + if (likely(count <= VMAP_MAX_ALLOC)) { debug_check_no_locks_freed(mem, size); vb_free(mem, size); @@ -1821,6 +1831,9 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro addr = va->va_start; mem = (void *)addr; } + + kasan_unpoison_vmalloc(mem, size); + if (vmap_page_range(addr, addr + size, prot, pages) < 0) { vm_unmap_ram(mem, count); return NULL; @@ -2075,6 +2088,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, { struct vmap_area *va; struct vm_struct *area; + unsigned long requested_size = size; BUG_ON(in_interrupt()); size = PAGE_ALIGN(size); @@ -2098,23 +2112,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, return NULL; } - setup_vmalloc_vm(area, va, flags, caller); + kasan_unpoison_vmalloc((void *)va->va_start, requested_size); - /* - * For KASAN, if we are in vmalloc space, we need to cover the shadow - * area with real memory. If we come here through VM_ALLOC, this is - * done by a higher level function that has access to the true size, - * which might not be a full page. - * - * We assume module space comes via VM_ALLOC path. - */ - if (is_vmalloc_addr(area->addr) && !(area->flags & VM_ALLOC)) { - if (kasan_populate_vmalloc(area->size, area)) { - unmap_vmap_area(va); - kfree(area); - return NULL; - } - } + setup_vmalloc_vm(area, va, flags, caller); return area; } @@ -2293,8 +2293,7 @@ static void __vunmap(const void *addr, int deallocate_pages) debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); - if (area->flags & VM_KASAN) - kasan_poison_vmalloc(area->addr, area->size); + kasan_poison_vmalloc(area->addr, area->size); vm_remove_mappings(area, deallocate_pages); @@ -2539,7 +2538,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, if (!size || (size >> PAGE_SHIFT) > totalram_pages()) goto fail; - area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | + area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) goto fail; @@ -2548,11 +2547,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, if (!addr) return NULL; - if (is_vmalloc_or_module_addr(area->addr)) { - if (kasan_populate_vmalloc(real_size, area)) - return NULL; - } - /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED * flag. It means that vm_struct is not fully initialized. @@ -3294,7 +3288,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, struct vmap_area **vas, *va; struct vm_struct **vms; int area, area2, last_area, term_area; - unsigned long base, start, size, end, last_end; + unsigned long base, start, size, end, last_end, orig_start, orig_end; bool purged = false; enum fit_type type; @@ -3424,6 +3418,15 @@ retry: spin_unlock(&free_vmap_area_lock); + /* populate the kasan shadow space */ + for (area = 0; area < nr_vms; area++) { + if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) + goto err_free_shadow; + + kasan_unpoison_vmalloc((void *)vas[area]->va_start, + sizes[area]); + } + /* insert all vm's */ spin_lock(&vmap_area_lock); for (area = 0; area < nr_vms; area++) { @@ -3434,12 +3437,6 @@ retry: } spin_unlock(&vmap_area_lock); - /* populate the shadow space outside of the lock */ - for (area = 0; area < nr_vms; area++) { - /* assume success here */ - kasan_populate_vmalloc(sizes[area], vms[area]); - } - kfree(vas); return vms; @@ -3451,8 +3448,12 @@ recovery: * and when pcpu_get_vm_areas() is success. */ while (area--) { - merge_or_add_vmap_area(vas[area], &free_vmap_area_root, - &free_vmap_area_list); + orig_start = vas[area]->va_start; + orig_end = vas[area]->va_end; + va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, + &free_vmap_area_list); + kasan_release_vmalloc(orig_start, orig_end, + va->va_start, va->va_end); vas[area] = NULL; } @@ -3487,6 +3488,28 @@ err_free2: kfree(vas); kfree(vms); return NULL; + +err_free_shadow: + spin_lock(&free_vmap_area_lock); + /* + * We release all the vmalloc shadows, even the ones for regions that + * hadn't been successfully added. This relies on kasan_release_vmalloc + * being able to tolerate this case. + */ + for (area = 0; area < nr_vms; area++) { + orig_start = vas[area]->va_start; + orig_end = vas[area]->va_end; + va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, + &free_vmap_area_list); + kasan_release_vmalloc(orig_start, orig_end, + va->va_start, va->va_end); + vas[area] = NULL; + kfree(vms[area]); + } + spin_unlock(&free_vmap_area_lock); + kfree(vas); + kfree(vms); + return NULL; } /** diff --git a/mm/vmscan.c b/mm/vmscan.c index 74e8edce83ca..572fb17c6273 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -387,7 +387,7 @@ void register_shrinker_prepared(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG if (shrinker->flags & SHRINKER_MEMCG_AWARE) idr_replace(&shrinker_idr, shrinker, shrinker->id); #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 2b2b9aae8a3c..22d17ecfe7df 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2069,6 +2069,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, zs_pool_dec_isolated(pool); } + if (page_zone(newpage) != page_zone(page)) { + dec_zone_page_state(page, NR_ZSPAGES); + inc_zone_page_state(newpage, NR_ZSPAGES); + } + reset_page(page); put_page(page); page = newpage; diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index c46daf09a501..bb7ec1a3915d 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -126,6 +126,7 @@ int vlan_check_real_dev(struct net_device *real_dev, void vlan_setup(struct net_device *dev); int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack); void unregister_vlan_dev(struct net_device *dev, struct list_head *head); +void vlan_dev_uninit(struct net_device *dev); bool vlan_dev_inherit_address(struct net_device *dev, struct net_device *real_dev); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e5bff5cc6f97..2a78da4072de 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -586,7 +586,8 @@ static int vlan_dev_init(struct net_device *dev) return 0; } -static void vlan_dev_uninit(struct net_device *dev) +/* Note: this function might be called multiple times for the same device. */ +void vlan_dev_uninit(struct net_device *dev) { struct vlan_priority_tci_mapping *pm; struct vlan_dev_priv *vlan = vlan_dev_priv(dev); diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index c482a6fe9393..0db85aeb119b 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -108,11 +108,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[], struct ifla_vlan_flags *flags; struct ifla_vlan_qos_mapping *m; struct nlattr *attr; - int rem; + int rem, err; if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); - vlan_dev_change_flags(dev, flags->flags, flags->mask); + err = vlan_dev_change_flags(dev, flags->flags, flags->mask); + if (err) + return err; } if (data[IFLA_VLAN_INGRESS_QOS]) { nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { @@ -123,7 +125,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_VLAN_EGRESS_QOS]) { nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) { m = nla_data(attr); - vlan_dev_set_egress_priority(dev, m->from, m->to); + err = vlan_dev_set_egress_priority(dev, m->from, m->to); + if (err) + return err; } } return 0; @@ -179,10 +183,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, return -EINVAL; err = vlan_changelink(dev, tb, data, extack); - if (err < 0) - return err; - - return register_vlan_dev(dev, extack); + if (!err) + err = register_vlan_dev(dev, extack); + if (err) + vlan_dev_uninit(dev); + return err; } static inline size_t vlan_qos_map_size(unsigned int n) diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index b0af3a11d406..ec7bf5a4a9fc 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -285,6 +285,7 @@ static u32 batadv_hash_dat(const void *data, u32 size) u32 hash = 0; const struct batadv_dat_entry *dat = data; const unsigned char *key; + __be16 vid; u32 i; key = (const unsigned char *)&dat->ip; @@ -294,7 +295,8 @@ static u32 batadv_hash_dat(const void *data, u32 size) hash ^= (hash >> 6); } - key = (const unsigned char *)&dat->vid; + vid = htons(dat->vid); + key = (__force const unsigned char *)&vid; for (i = 0; i < sizeof(dat->vid); i++) { hash += key[i]; hash += (hash << 10); diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index af7800103e51..59980ecfc962 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -662,6 +662,9 @@ static unsigned int br_nf_forward_arp(void *priv, nf_bridge_pull_encap_header(skb); } + if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr)))) + return NF_DROP; + if (arp_hdr(skb)->ar_pln != 4) { if (is_vlan_arp(skb, state->net)) nf_bridge_push_encap_header(skb); diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c index 2cdfc5d6c25d..8c69f0c95a8e 100644 --- a/net/bridge/br_nf_core.c +++ b/net/bridge/br_nf_core.c @@ -22,7 +22,8 @@ #endif static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 4096d8a74a2b..e1256e03a9a8 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1867,7 +1867,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) } static int ebt_buf_add(struct ebt_entries_buf_state *state, - void *data, unsigned int sz) + const void *data, unsigned int sz) { if (state->buf_kern_start == NULL) goto count_only; @@ -1901,7 +1901,7 @@ enum compat_mwt { EBT_COMPAT_TARGET, }; -static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, +static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt, enum compat_mwt compat_mwt, struct ebt_entries_buf_state *state, const unsigned char *base) @@ -1979,22 +1979,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, /* return size of all matches, watchers or target, including necessary * alignment and padding. */ -static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, +static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32, unsigned int size_left, enum compat_mwt type, struct ebt_entries_buf_state *state, const void *base) { + const char *buf = (const char *)match32; int growth = 0; - char *buf; if (size_left == 0) return 0; - buf = (char *) match32; - - while (size_left >= sizeof(*match32)) { + do { struct ebt_entry_match *match_kern; int ret; + if (size_left < sizeof(*match32)) + return -EINVAL; + match_kern = (struct ebt_entry_match *) state->buf_kern_start; if (match_kern) { char *tmp; @@ -2031,22 +2032,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, if (match_kern) match_kern->match_size = ret; - /* rule should have no remaining data after target */ - if (type == EBT_COMPAT_TARGET && size_left) - return -EINVAL; - match32 = (struct compat_ebt_entry_mwt *) buf; - } + } while (size_left); return growth; } /* called for all ebt_entry structures. */ -static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, +static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base, unsigned int *total, struct ebt_entries_buf_state *state) { - unsigned int i, j, startoff, new_offset = 0; + unsigned int i, j, startoff, next_expected_off, new_offset = 0; /* stores match/watchers/targets & offset of next struct ebt_entry: */ unsigned int offsets[4]; unsigned int *offsets_update = NULL; @@ -2132,11 +2129,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, return ret; } - startoff = state->buf_user_offset - startoff; + next_expected_off = state->buf_user_offset - startoff; + if (next_expected_off != entry->next_offset) + return -EINVAL; - if (WARN_ON(*total < startoff)) + if (*total < entry->next_offset) return -EINVAL; - *total -= startoff; + *total -= entry->next_offset; return 0; } diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index de09b0a65791..f7587428febd 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -423,9 +423,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct j1939_sock *jsk = j1939_sk(sock->sk); - struct j1939_priv *priv = jsk->priv; - struct sock *sk = sock->sk; - struct net *net = sock_net(sk); + struct j1939_priv *priv; + struct sock *sk; + struct net *net; int ret = 0; ret = j1939_sk_sanity_check(addr, len); @@ -434,6 +434,10 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) lock_sock(sock->sk); + priv = jsk->priv; + sk = sock->sk; + net = sock_net(sk); + /* Already bound to an interface? */ if (jsk->state & J1939_SOCK_BOUND) { /* A re-bind() to a different interface is not diff --git a/net/core/dev.c b/net/core/dev.c index 0ad39c87b7fd..7e885d069707 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9177,22 +9177,10 @@ static void netdev_unregister_lockdep_key(struct net_device *dev) void netdev_update_lockdep_key(struct net_device *dev) { - struct netdev_queue *queue; - int i; - - lockdep_unregister_key(&dev->qdisc_xmit_lock_key); lockdep_unregister_key(&dev->addr_list_lock_key); - - lockdep_register_key(&dev->qdisc_xmit_lock_key); lockdep_register_key(&dev->addr_list_lock_key); lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key); - for (i = 0; i < dev->num_tx_queues; i++) { - queue = netdev_get_tx_queue(dev, i); - - lockdep_set_class(&queue->_xmit_lock, - &dev->qdisc_xmit_lock_key); - } } EXPORT_SYMBOL(netdev_update_lockdep_key); diff --git a/net/core/devlink.c b/net/core/devlink.c index 4c63c9a4c09e..f76219bf0c21 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -6406,7 +6406,7 @@ static bool devlink_port_type_should_warn(struct devlink_port *devlink_port) devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA; } -#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 30) +#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600) static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port) { @@ -7563,7 +7563,7 @@ void devlink_region_destroy(struct devlink_region *region) EXPORT_SYMBOL_GPL(devlink_region_destroy); /** - * devlink_region_shapshot_id_get - get snapshot ID + * devlink_region_snapshot_id_get - get snapshot ID * * This callback should be called when adding a new snapshot, * Driver should use the same id for multiple snapshots taken @@ -7571,7 +7571,7 @@ EXPORT_SYMBOL_GPL(devlink_region_destroy); * * @devlink: devlink */ -u32 devlink_region_shapshot_id_get(struct devlink *devlink) +u32 devlink_region_snapshot_id_get(struct devlink *devlink) { u32 id; @@ -7581,7 +7581,7 @@ u32 devlink_region_shapshot_id_get(struct devlink *devlink) return id; } -EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get); +EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get); /** * devlink_region_snapshot_create - create a new snapshot diff --git a/net/core/filter.c b/net/core/filter.c index c19dd0973e0c..538f6a735a19 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2055,6 +2055,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) } skb->dev = dev; + skb->tstamp = 0; dev_xmit_recursion_inc(); ret = dev_queue_xmit(skb); @@ -2230,10 +2231,10 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += len; len = sk_msg_elem(msg, i)->length; if (start < offset + len) break; - offset += len; sk_msg_iter_var_next(i); } while (i != msg->sg.end); @@ -2345,7 +2346,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; - u32 new, i = 0, l, space, copy = 0, offset = 0; + u32 new, i = 0, l = 0, space, copy = 0, offset = 0; u8 *raw, *to, *from; struct page *page; @@ -2355,11 +2356,11 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += l; l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; - offset += l; sk_msg_iter_var_next(i); } while (i != msg->sg.end); @@ -2414,6 +2415,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, sk_msg_iter_var_next(i); sg_unmark_end(psge); + sg_unmark_end(&rsge); sk_msg_iter_next(msg, end); } @@ -2505,7 +2507,7 @@ static void sk_msg_shift_right(struct sk_msg *msg, int i) BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { - u32 i = 0, l, space, offset = 0; + u32 i = 0, l = 0, space, offset = 0; u64 last = start + len; int pop; @@ -2515,11 +2517,11 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, /* First find the starting scatterlist element */ i = msg->sg.start; do { + offset += l; l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; - offset += l; sk_msg_iter_var_next(i); } while (i != msg->sg.end); @@ -5317,8 +5319,7 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { - if (!sock_flag(sk, SOCK_RCU_FREE)) - sock_gen_put(sk); + sock_gen_put(sk); return NULL; } } @@ -5355,8 +5356,7 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { - if (!sock_flag(sk, SOCK_RCU_FREE)) - sock_gen_put(sk); + sock_gen_put(sk); return NULL; } } @@ -5423,7 +5423,8 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { BPF_CALL_1(bpf_sk_release, struct sock *, sk) { - if (!sock_flag(sk, SOCK_RCU_FREE)) + /* Only full sockets have sk->sk_flags. */ + if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return 0; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 652da6369037..920784a9b7ff 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -98,9 +98,6 @@ static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) static void neigh_cleanup_and_release(struct neighbour *neigh) { - if (neigh->parms->neigh_cleanup) - neigh->parms->neigh_cleanup(neigh); - trace_neigh_cleanup_and_release(neigh, 0); __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 5c4624298996..4c826b8bf9b1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -919,14 +919,17 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) struct kobject *kobj = &queue->kobj; int error = 0; + /* Kobject_put later will trigger rx_queue_release call which + * decreases dev refcount: Take that reference here + */ + dev_hold(queue->dev); + kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, "rx-%u", index); if (error) goto err; - dev_hold(queue->dev); - if (dev->sysfs_rx_queue_group) { error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); if (error) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index ded2d5227678..3866d7e20c07 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -594,6 +594,8 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy); void sk_psock_drop(struct sock *sk, struct sk_psock *psock) { + sock_owned_by_me(sk); + sk_psock_cork_free(psock); sk_psock_zap_ingress(psock); diff --git a/net/core/sock.c b/net/core/sock.c index 043db3ce023e..8459ad579f73 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2916,7 +2916,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_max_pacing_rate = ~0UL; sk->sk_pacing_rate = ~0UL; - sk->sk_pacing_shift = 10; + WRITE_ONCE(sk->sk_pacing_shift, 10); sk->sk_incoming_cpu = -1; sk_rx_queue_clear(sk); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index eb114ee419b6..8998e356f423 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -241,8 +241,11 @@ static void sock_map_free(struct bpf_map *map) struct sock *sk; sk = xchg(psk, NULL); - if (sk) + if (sk) { + lock_sock(sk); sock_map_unref(sk, psk); + release_sock(sk); + } } raw_spin_unlock_bh(&stab->lock); rcu_read_unlock(); @@ -862,7 +865,9 @@ static void sock_hash_free(struct bpf_map *map) raw_spin_lock_bh(&bucket->lock); hlist_for_each_entry_safe(elem, node, &bucket->head, node) { hlist_del_rcu(&elem->node); + lock_sock(elem->sk); sock_map_unref(elem->sk, elem); + release_sock(elem->sk); } raw_spin_unlock_bh(&bucket->lock); } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index eb29e5adc84d..9f9e00ba3ad7 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -288,6 +288,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, return ret; } +# ifdef CONFIG_HAVE_EBPF_JIT static int proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, @@ -298,6 +299,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } +# endif /* CONFIG_HAVE_EBPF_JIT */ static int proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index aea918135ec3..08c3dc45f1a4 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -110,7 +110,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how); static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); static void dn_dst_link_failure(struct sk_buff *); static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb , u32 mtu); + struct sk_buff *skb , u32 mtu, + bool confirm_neigh); static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, @@ -251,7 +252,8 @@ static int dn_dst_gc(struct dst_ops *ops) * advertise to the other end). */ static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { struct dn_route *rt = (struct dn_route *) dst; struct neighbour *n = rt->n; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 9ef2caa13f27..c66abbed4daf 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -124,7 +124,8 @@ static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst, return NULL; } -struct dsa_link *dsa_link_touch(struct dsa_port *dp, struct dsa_port *link_dp) +static struct dsa_link *dsa_link_touch(struct dsa_port *dp, + struct dsa_port *link_dp) { struct dsa_switch *ds = dp->ds; struct dsa_switch_tree *dst; diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c index b678160bbd66..408d4af390a0 100644 --- a/net/dsa/tag_gswip.c +++ b/net/dsa/tag_gswip.c @@ -104,7 +104,7 @@ static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb, } static const struct dsa_device_ops gswip_netdev_ops = { - .name = "gwsip", + .name = "gswip", .proto = DSA_TAG_PROTO_GSWIP, .xmit = gswip_tag_xmit, .rcv = gswip_tag_rcv, diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 73605bcbb385..90d055c4df9e 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -84,8 +84,6 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb, * (eg, 0x00=port1, 0x02=port3, 0x06=port7) */ -#define KSZ8795_INGRESS_TAG_LEN 1 - #define KSZ8795_TAIL_TAG_OVERRIDE BIT(6) #define KSZ8795_TAIL_TAG_LOOKUP BIT(7) @@ -96,12 +94,12 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev) u8 *tag; u8 *addr; - nskb = ksz_common_xmit(skb, dev, KSZ8795_INGRESS_TAG_LEN); + nskb = ksz_common_xmit(skb, dev, KSZ_INGRESS_TAG_LEN); if (!nskb) return NULL; /* Tag encoding */ - tag = skb_put(nskb, KSZ8795_INGRESS_TAG_LEN); + tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN); addr = skb_mac_header(nskb); *tag = 1 << dp->index; @@ -124,7 +122,7 @@ static const struct dsa_device_ops ksz8795_netdev_ops = { .proto = DSA_TAG_PROTO_KSZ8795, .xmit = ksz8795_xmit, .rcv = ksz8795_rcv, - .overhead = KSZ8795_INGRESS_TAG_LEN, + .overhead = KSZ_INGRESS_TAG_LEN, }; DSA_TAG_DRIVER(ksz8795_netdev_ops); diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index c95885215525..c8a128c9e5e0 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -33,9 +33,6 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) struct dsa_port *dp = dsa_slave_to_port(dev); u16 *phdr, hdr; - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - if (skb_cow_head(skb, 0) < 0) return NULL; diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c index 94447974a3c0..d5f709b940ff 100644 --- a/net/hsr/hsr_debugfs.c +++ b/net/hsr/hsr_debugfs.c @@ -20,6 +20,8 @@ #include "hsr_main.h" #include "hsr_framereg.h" +static struct dentry *hsr_debugfs_root_dir; + static void print_mac_address(struct seq_file *sfp, unsigned char *mac) { seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:", @@ -63,8 +65,20 @@ hsr_node_table_open(struct inode *inode, struct file *filp) return single_open(filp, hsr_node_table_show, inode->i_private); } +void hsr_debugfs_rename(struct net_device *dev) +{ + struct hsr_priv *priv = netdev_priv(dev); + struct dentry *d; + + d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root, + hsr_debugfs_root_dir, dev->name); + if (IS_ERR(d)) + netdev_warn(dev, "failed to rename\n"); + else + priv->node_tbl_root = d; +} + static const struct file_operations hsr_fops = { - .owner = THIS_MODULE, .open = hsr_node_table_open, .read = seq_read, .llseek = seq_lseek, @@ -78,15 +92,14 @@ static const struct file_operations hsr_fops = { * When debugfs is configured this routine sets up the node_table file per * hsr device for dumping the node_table entries */ -int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) +void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) { - int rc = -1; struct dentry *de = NULL; - de = debugfs_create_dir(hsr_dev->name, NULL); - if (!de) { - pr_err("Cannot create hsr debugfs root\n"); - return rc; + de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir); + if (IS_ERR(de)) { + pr_err("Cannot create hsr debugfs directory\n"); + return; } priv->node_tbl_root = de; @@ -94,13 +107,13 @@ int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) de = debugfs_create_file("node_table", S_IFREG | 0444, priv->node_tbl_root, priv, &hsr_fops); - if (!de) { - pr_err("Cannot create hsr node_table directory\n"); - return rc; + if (IS_ERR(de)) { + pr_err("Cannot create hsr node_table file\n"); + debugfs_remove(priv->node_tbl_root); + priv->node_tbl_root = NULL; + return; } priv->node_tbl_file = de; - - return 0; } /* hsr_debugfs_term - Tear down debugfs intrastructure @@ -117,3 +130,18 @@ hsr_debugfs_term(struct hsr_priv *priv) debugfs_remove(priv->node_tbl_root); priv->node_tbl_root = NULL; } + +void hsr_debugfs_create_root(void) +{ + hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL); + if (IS_ERR(hsr_debugfs_root_dir)) { + pr_err("Cannot create hsr debugfs root directory\n"); + hsr_debugfs_root_dir = NULL; + } +} + +void hsr_debugfs_remove_root(void) +{ + /* debugfs_remove() internally checks NULL and ERROR */ + debugfs_remove(hsr_debugfs_root_dir); +} diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index b01e1bae4ddc..c7bd6c49fadf 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -272,6 +272,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master, skb->dev->dev_addr, skb->len) <= 0) goto out; skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); if (hsr_ver > 0) { hsr_tag = skb_put(skb, sizeof(struct hsr_tag)); @@ -368,7 +370,7 @@ static void hsr_dev_destroy(struct net_device *hsr_dev) del_timer_sync(&hsr->prune_timer); del_timer_sync(&hsr->announce_timer); - hsr_del_self_node(&hsr->self_node_db); + hsr_del_self_node(hsr); hsr_del_nodes(&hsr->node_db); } @@ -440,11 +442,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], INIT_LIST_HEAD(&hsr->ports); INIT_LIST_HEAD(&hsr->node_db); INIT_LIST_HEAD(&hsr->self_node_db); + spin_lock_init(&hsr->list_lock); ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr); /* Make sure we recognize frames from ourselves in hsr_rcv() */ - res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr, + res = hsr_create_self_node(hsr, hsr_dev->dev_addr, slave[1]->dev_addr); if (res < 0) return res; @@ -477,31 +480,32 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER); if (res) - goto err_add_port; + goto err_add_master; res = register_netdevice(hsr_dev); if (res) - goto fail; + goto err_unregister; res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A); if (res) - goto fail; + goto err_add_slaves; + res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B); if (res) - goto fail; + goto err_add_slaves; + hsr_debugfs_init(hsr, hsr_dev); mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD)); - res = hsr_debugfs_init(hsr, hsr_dev); - if (res) - goto fail; return 0; -fail: +err_add_slaves: + unregister_netdevice(hsr_dev); +err_unregister: list_for_each_entry_safe(port, tmp, &hsr->ports, port_list) hsr_del_port(port); -err_add_port: - hsr_del_self_node(&hsr->self_node_db); +err_add_master: + hsr_del_self_node(hsr); return res; } diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 292be446007b..27dc65d7de67 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -75,10 +75,11 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db, /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize * frames from self that's been looped over the HSR ring. */ -int hsr_create_self_node(struct list_head *self_node_db, +int hsr_create_self_node(struct hsr_priv *hsr, unsigned char addr_a[ETH_ALEN], unsigned char addr_b[ETH_ALEN]) { + struct list_head *self_node_db = &hsr->self_node_db; struct hsr_node *node, *oldnode; node = kmalloc(sizeof(*node), GFP_KERNEL); @@ -88,33 +89,33 @@ int hsr_create_self_node(struct list_head *self_node_db, ether_addr_copy(node->macaddress_A, addr_a); ether_addr_copy(node->macaddress_B, addr_b); - rcu_read_lock(); + spin_lock_bh(&hsr->list_lock); oldnode = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); if (oldnode) { list_replace_rcu(&oldnode->mac_list, &node->mac_list); - rcu_read_unlock(); - synchronize_rcu(); - kfree(oldnode); + spin_unlock_bh(&hsr->list_lock); + kfree_rcu(oldnode, rcu_head); } else { - rcu_read_unlock(); list_add_tail_rcu(&node->mac_list, self_node_db); + spin_unlock_bh(&hsr->list_lock); } return 0; } -void hsr_del_self_node(struct list_head *self_node_db) +void hsr_del_self_node(struct hsr_priv *hsr) { + struct list_head *self_node_db = &hsr->self_node_db; struct hsr_node *node; - rcu_read_lock(); + spin_lock_bh(&hsr->list_lock); node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); - rcu_read_unlock(); if (node) { list_del_rcu(&node->mac_list); - kfree(node); + kfree_rcu(node, rcu_head); } + spin_unlock_bh(&hsr->list_lock); } void hsr_del_nodes(struct list_head *node_db) @@ -130,30 +131,43 @@ void hsr_del_nodes(struct list_head *node_db) * seq_out is used to initialize filtering of outgoing duplicate frames * originating from the newly added node. */ -struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], - u16 seq_out) +static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, + struct list_head *node_db, + unsigned char addr[], + u16 seq_out) { - struct hsr_node *node; + struct hsr_node *new_node, *node; unsigned long now; int i; - node = kzalloc(sizeof(*node), GFP_ATOMIC); - if (!node) + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) return NULL; - ether_addr_copy(node->macaddress_A, addr); + ether_addr_copy(new_node->macaddress_A, addr); /* We are only interested in time diffs here, so use current jiffies * as initialization. (0 could trigger an spurious ring error warning). */ now = jiffies; for (i = 0; i < HSR_PT_PORTS; i++) - node->time_in[i] = now; + new_node->time_in[i] = now; for (i = 0; i < HSR_PT_PORTS; i++) - node->seq_out[i] = seq_out; - - list_add_tail_rcu(&node->mac_list, node_db); + new_node->seq_out[i] = seq_out; + spin_lock_bh(&hsr->list_lock); + list_for_each_entry_rcu(node, node_db, mac_list) { + if (ether_addr_equal(node->macaddress_A, addr)) + goto out; + if (ether_addr_equal(node->macaddress_B, addr)) + goto out; + } + list_add_tail_rcu(&new_node->mac_list, node_db); + spin_unlock_bh(&hsr->list_lock); + return new_node; +out: + spin_unlock_bh(&hsr->list_lock); + kfree(new_node); return node; } @@ -163,6 +177,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, bool is_sup) { struct list_head *node_db = &port->hsr->node_db; + struct hsr_priv *hsr = port->hsr; struct hsr_node *node; struct ethhdr *ethhdr; u16 seq_out; @@ -196,7 +211,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, seq_out = HSR_SEQNR_START; } - return hsr_add_node(node_db, ethhdr->h_source, seq_out); + return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out); } /* Use the Supervision frame's info about an eventual macaddress_B for merging @@ -206,10 +221,11 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, struct hsr_port *port_rcv) { - struct ethhdr *ethhdr; - struct hsr_node *node_real; + struct hsr_priv *hsr = port_rcv->hsr; struct hsr_sup_payload *hsr_sp; + struct hsr_node *node_real; struct list_head *node_db; + struct ethhdr *ethhdr; int i; ethhdr = (struct ethhdr *)skb_mac_header(skb); @@ -231,7 +247,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A); if (!node_real) /* No frame received from AddrA of this node yet */ - node_real = hsr_add_node(node_db, hsr_sp->macaddress_A, + node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A, HSR_SEQNR_START - 1); if (!node_real) goto done; /* No mem */ @@ -252,7 +268,9 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, } node_real->addr_B_port = port_rcv->type; + spin_lock_bh(&hsr->list_lock); list_del_rcu(&node_curr->mac_list); + spin_unlock_bh(&hsr->list_lock); kfree_rcu(node_curr, rcu_head); done: @@ -368,12 +386,13 @@ void hsr_prune_nodes(struct timer_list *t) { struct hsr_priv *hsr = from_timer(hsr, t, prune_timer); struct hsr_node *node; + struct hsr_node *tmp; struct hsr_port *port; unsigned long timestamp; unsigned long time_a, time_b; - rcu_read_lock(); - list_for_each_entry_rcu(node, &hsr->node_db, mac_list) { + spin_lock_bh(&hsr->list_lock); + list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) { /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A] * nor time_in[HSR_PT_SLAVE_B], will ever be updated for * the master port. Thus the master node will be repeatedly @@ -421,7 +440,7 @@ void hsr_prune_nodes(struct timer_list *t) kfree_rcu(node, rcu_head); } } - rcu_read_unlock(); + spin_unlock_bh(&hsr->list_lock); /* Restart timer */ mod_timer(&hsr->prune_timer, diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 89a3ce38151d..0f0fa12b4329 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -12,10 +12,8 @@ struct hsr_node; -void hsr_del_self_node(struct list_head *self_node_db); +void hsr_del_self_node(struct hsr_priv *hsr); void hsr_del_nodes(struct list_head *node_db); -struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], - u16 seq_out); struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, bool is_sup); void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, @@ -33,7 +31,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, void hsr_prune_nodes(struct timer_list *t); -int hsr_create_self_node(struct list_head *self_node_db, +int hsr_create_self_node(struct hsr_priv *hsr, unsigned char addr_a[ETH_ALEN], unsigned char addr_b[ETH_ALEN]); diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index b9988a662ee1..9e389accbfc7 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c @@ -45,6 +45,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, case NETDEV_CHANGE: /* Link (carrier) state changes */ hsr_check_carrier_and_operstate(hsr); break; + case NETDEV_CHANGENAME: + if (is_hsr_master(dev)) + hsr_debugfs_rename(dev); + break; case NETDEV_CHANGEADDR: if (port->type == HSR_PT_MASTER) { /* This should not happen since there's no @@ -64,7 +68,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, /* Make sure we recognize frames from ourselves in hsr_rcv() */ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); - res = hsr_create_self_node(&hsr->self_node_db, + res = hsr_create_self_node(hsr, master->dev->dev_addr, port ? port->dev->dev_addr : @@ -123,6 +127,7 @@ static void __exit hsr_exit(void) { unregister_netdevice_notifier(&hsr_nb); hsr_netlink_exit(); + hsr_debugfs_remove_root(); } module_init(hsr_init); diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 96fac696a1e1..d40de84a637f 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -160,8 +160,9 @@ struct hsr_priv { int announce_count; u16 sequence_nr; u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */ - u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ - spinlock_t seqnr_lock; /* locking for sequence_nr */ + u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ + spinlock_t seqnr_lock; /* locking for sequence_nr */ + spinlock_t list_lock; /* locking for node list */ unsigned char sup_multicast_addr[ETH_ALEN]; #ifdef CONFIG_DEBUG_FS struct dentry *node_tbl_root; @@ -184,17 +185,24 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb) } #if IS_ENABLED(CONFIG_DEBUG_FS) -int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); +void hsr_debugfs_rename(struct net_device *dev); +void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); void hsr_debugfs_term(struct hsr_priv *priv); +void hsr_debugfs_create_root(void); +void hsr_debugfs_remove_root(void); #else -static inline int hsr_debugfs_init(struct hsr_priv *priv, - struct net_device *hsr_dev) +static inline void void hsr_debugfs_rename(struct net_device *dev) { - return 0; } - +static inline void hsr_debugfs_init(struct hsr_priv *priv, + struct net_device *hsr_dev) +{} static inline void hsr_debugfs_term(struct hsr_priv *priv) {} +static inline void hsr_debugfs_create_root(void) +{} +static inline void hsr_debugfs_remove_root(void) +{} #endif #endif /* __HSR_PRIVATE_H */ diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 8f8337f893ba..8dc0547f01d0 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -476,6 +476,7 @@ int __init hsr_netlink_init(void) if (rc) goto fail_genl_register_family; + hsr_debugfs_create_root(); return 0; fail_genl_register_family: diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index b9df9c09b84e..195469a13371 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2193,6 +2193,12 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, int count = cb->args[2]; t_key key = cb->args[3]; + /* First time here, count and key are both always 0. Count > 0 + * and key == 0 means the dump has wrapped around and we are done. + */ + if (count && !key) + return skb->len; + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { int err; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index e4c6e8b40490..18c0d5bffe12 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1086,7 +1086,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) if (!dst) goto out; } - dst->ops->update_pmtu(dst, sk, NULL, mtu); + dst->ops->update_pmtu(dst, sk, NULL, mtu, true); dst = __sk_dst_check(sk, 0); if (!dst) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index af154977904c..f11e997e517b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -911,11 +911,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, for (i = s_i; i < INET_LHTABLE_SIZE; i++) { struct inet_listen_hashbucket *ilb; + struct hlist_nulls_node *node; num = 0; ilb = &hashinfo->listening_hash[i]; spin_lock(&ilb->lock); - sk_for_each(sk, &ilb->head) { + sk_nulls_for_each(sk, node, &ilb->nulls_head) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net)) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 83fb00153018..2bbaaf0c7176 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -516,10 +516,11 @@ static int inet_reuseport_add_sock(struct sock *sk, struct inet_listen_hashbucket *ilb) { struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; + const struct hlist_nulls_node *node; struct sock *sk2; kuid_t uid = sock_i_uid(sk); - sk_for_each_rcu(sk2, &ilb->head) { + sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) { if (sk2 != sk && sk2->sk_family == sk->sk_family && ipv6_only_sock(sk2) == ipv6_only_sock(sk) && @@ -555,9 +556,9 @@ int __inet_hash(struct sock *sk, struct sock *osk) } if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && sk->sk_family == AF_INET6) - hlist_add_tail_rcu(&sk->sk_node, &ilb->head); + __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head); else - hlist_add_head_rcu(&sk->sk_node, &ilb->head); + __sk_nulls_add_node_rcu(sk, &ilb->nulls_head); inet_hash2(hashinfo, sk); ilb->count++; sock_set_flag(sk, SOCK_RCU_FREE); @@ -606,11 +607,9 @@ void inet_unhash(struct sock *sk) reuseport_detach_sock(sk); if (ilb) { inet_unhash2(hashinfo, sk); - __sk_del_node_init(sk); - ilb->count--; - } else { - __sk_nulls_del_node_init_rcu(sk); + ilb->count--; } + __sk_nulls_del_node_init_rcu(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); unlock: spin_unlock_bh(lock); @@ -750,7 +749,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h) for (i = 0; i < INET_LHTABLE_SIZE; i++) { spin_lock_init(&h->listening_hash[i].lock); - INIT_HLIST_HEAD(&h->listening_hash[i].head); + INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head, + i + LISTENING_NULLS_BASE); h->listening_hash[i].count = 0; } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 38c02bb62e2c..0fe2a5d3e258 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -505,7 +505,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; if (skb_valid_dst(skb)) - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IP)) { if (!skb_is_gso(skb) && diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 9b153c7fcbb4..e90b600c7a25 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -214,7 +214,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, mtu = dst_mtu(dst); if (skb->len > mtu) { - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IP)) { icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 214154b47d56..f1f78a742b36 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -384,10 +384,11 @@ next: ; return 1; } -static inline int check_target(struct arpt_entry *e, const char *name) +static int check_target(struct arpt_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = arpt_get_target(e); struct xt_tgchk_param par = { + .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, @@ -399,8 +400,9 @@ static inline int check_target(struct arpt_entry *e, const char *name) return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); } -static inline int -find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, +static int +find_check_entry(struct arpt_entry *e, struct net *net, const char *name, + unsigned int size, struct xt_percpu_counter_alloc_state *alloc_state) { struct xt_entry_target *t; @@ -419,7 +421,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, } t->u.kernel.target = target; - ret = check_target(e, name); + ret = check_target(e, net, name); if (ret) goto err; return 0; @@ -494,12 +496,13 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, return 0; } -static inline void cleanup_entry(struct arpt_entry *e) +static void cleanup_entry(struct arpt_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; t = arpt_get_target(e); + par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_ARP; @@ -512,7 +515,9 @@ static inline void cleanup_entry(struct arpt_entry *e) /* Checks and translates the user-supplied table segment (held in * newinfo). */ -static int translate_table(struct xt_table_info *newinfo, void *entry0, +static int translate_table(struct net *net, + struct xt_table_info *newinfo, + void *entry0, const struct arpt_replace *repl) { struct xt_percpu_counter_alloc_state alloc_state = { 0 }; @@ -569,7 +574,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { - ret = find_check_entry(iter, repl->name, repl->size, + ret = find_check_entry(iter, net, repl->name, repl->size, &alloc_state); if (ret != 0) break; @@ -580,7 +585,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; - cleanup_entry(iter); + cleanup_entry(iter, net); } return ret; } @@ -923,7 +928,7 @@ static int __do_replace(struct net *net, const char *name, /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries; xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) - cleanup_entry(iter); + cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, @@ -974,7 +979,7 @@ static int do_replace(struct net *net, const void __user *user, goto free_newinfo; } - ret = translate_table(newinfo, loc_cpu_entry, &tmp); + ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; @@ -986,7 +991,7 @@ static int do_replace(struct net *net, const void __user *user, free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) - cleanup_entry(iter); + cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; @@ -1149,7 +1154,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, } } -static int translate_compat_table(struct xt_table_info **pinfo, +static int translate_compat_table(struct net *net, + struct xt_table_info **pinfo, void **pentry0, const struct compat_arpt_replace *compatr) { @@ -1217,7 +1223,7 @@ static int translate_compat_table(struct xt_table_info **pinfo, repl.num_counters = 0; repl.counters = NULL; repl.size = newinfo->size; - ret = translate_table(newinfo, entry1, &repl); + ret = translate_table(net, newinfo, entry1, &repl); if (ret) goto free_newinfo; @@ -1270,7 +1276,7 @@ static int compat_do_replace(struct net *net, void __user *user, goto free_newinfo; } - ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp); + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; @@ -1282,7 +1288,7 @@ static int compat_do_replace(struct net *net, void __user *user, free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) - cleanup_entry(iter); + cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; @@ -1509,7 +1515,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len return ret; } -static void __arpt_unregister_table(struct xt_table *table) +static void __arpt_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; @@ -1521,7 +1527,7 @@ static void __arpt_unregister_table(struct xt_table *table) /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) - cleanup_entry(iter); + cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); @@ -1546,7 +1552,7 @@ int arpt_register_table(struct net *net, loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); - ret = translate_table(newinfo, loc_cpu_entry, repl); + ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; @@ -1561,7 +1567,7 @@ int arpt_register_table(struct net *net, ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { - __arpt_unregister_table(new_table); + __arpt_unregister_table(net, new_table); *res = NULL; } @@ -1576,7 +1582,7 @@ void arpt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); - __arpt_unregister_table(table); + __arpt_unregister_table(net, table); } /* The built-in targets: standard (NULL) and error. */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f88c93c38f11..87e979f2b74a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -139,7 +139,8 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst); static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu); + struct sk_buff *skb, u32 mtu, + bool confirm_neigh); static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static void ipv4_dst_destroy(struct dst_entry *dst); @@ -1043,7 +1044,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) } static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { struct rtable *rt = (struct rtable *) dst; struct flowi4 fl4; @@ -2687,7 +2689,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) } static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3e50ac24fe41..d885ba868822 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1087,8 +1087,7 @@ do_error: goto out; out_err: /* make sure we wake any epoll edge trigger waiter */ - if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && - err == -EAGAIN)) { + if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { sk->sk_write_space(sk); tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); } @@ -1419,8 +1418,7 @@ out_err: sock_zerocopy_put_abort(uarg, true); err = sk_stream_error(sk, flags, err); /* make sure we wake any epoll edge trigger waiter */ - if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && - err == -EAGAIN)) { + if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { sk->sk_write_space(sk); tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); } diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 32772d6ded4e..a6545ef0d27b 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -306,7 +306,8 @@ static u32 bbr_tso_segs_goal(struct sock *sk) /* Sort of tcp_tso_autosize() but ignoring * driver provided sk_gso_max_size. */ - bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift, + bytes = min_t(unsigned long, + sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), GSO_MAX_SIZE - 1 - MAX_TCP_HEADER); segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index e38705165ac9..8a01428f80c1 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -121,14 +121,14 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, struct sk_psock *psock; int copied, ret; - if (unlikely(flags & MSG_ERRQUEUE)) - return inet_recv_error(sk, msg, len, addr_len); - if (!skb_queue_empty(&sk->sk_receive_queue)) - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); - psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + if (unlikely(flags & MSG_ERRQUEUE)) + return inet_recv_error(sk, msg, len, addr_len); + if (!skb_queue_empty(&sk->sk_receive_queue) && + sk_psock_queue_empty(psock)) + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); lock_sock(sk); msg_bytes_ready: copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags); @@ -139,7 +139,7 @@ msg_bytes_ready: timeo = sock_rcvtimeo(sk, nonblock); data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err); if (data) { - if (skb_queue_empty(&sk->sk_receive_queue)) + if (!sk_psock_queue_empty(psock)) goto msg_bytes_ready; release_sock(sk); sk_psock_put(sk, psock); @@ -315,10 +315,7 @@ more_data: */ delta = msg->sg.size; psock->eval = sk_psock_msg_verdict(sk, psock, msg); - if (msg->sg.size < delta) - delta -= msg->sg.size; - else - delta = 0; + delta -= msg->sg.size; } if (msg->cork_bytes && diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 88b987ca9ebb..5347ab2c9c58 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -915,9 +915,10 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, /* This must be called before lost_out is incremented */ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { - if (!tp->retransmit_skb_hint || - before(TCP_SKB_CB(skb)->seq, - TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) + if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) || + (tp->retransmit_skb_hint && + before(TCP_SKB_CB(skb)->seq, + TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) tp->retransmit_skb_hint = skb; } @@ -1727,8 +1728,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, } /* Ignore very old stuff early */ - if (!after(sp[used_sacks].end_seq, prior_snd_una)) + if (!after(sp[used_sacks].end_seq, prior_snd_una)) { + if (i == 0) + first_sack_index = -1; continue; + } used_sacks++; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 92282f98dc82..1c7326e04f9b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2147,13 +2147,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur) struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; + struct hlist_nulls_node *node; struct sock *sk = cur; if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock(&ilb->lock); - sk = sk_head(&ilb->head); + sk = sk_nulls_head(&ilb->nulls_head); st->offset = 0; goto get_sk; } @@ -2161,9 +2162,9 @@ get_head: ++st->num; ++st->offset; - sk = sk_next(sk); + sk = sk_nulls_next(sk); get_sk: - sk_for_each_from(sk) { + sk_nulls_for_each_from(sk, node) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == afinfo->family) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b184f03d7437..58c92a7d671c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -72,6 +72,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) __skb_unlink(skb, &sk->sk_write_queue); tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); + if (tp->highest_sack == NULL) + tp->highest_sack = skb; + tp->packets_out += tcp_skb_pcount(skb); if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); @@ -1725,7 +1728,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, u32 bytes, segs; bytes = min_t(unsigned long, - sk->sk_pacing_rate >> sk->sk_pacing_shift, + sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); /* Goal is to send at least one packet per ms, @@ -2260,7 +2263,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, limit = max_t(unsigned long, 2 * skb->truesize, - sk->sk_pacing_rate >> sk->sk_pacing_shift); + sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); if (sk->sk_pacing_status == SK_PACING_NONE) limit = min_t(unsigned long, limit, sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); @@ -2438,6 +2441,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (tcp_small_queue_check(sk, skb, 0)) break; + /* Argh, we hit an empty skb(), presumably a thread + * is sleeping in sendmsg()/sk_stream_wait_memory(). + * We do not want to send a pure-ack packet and have + * a strange looking rtx queue with empty packet(s). + */ + if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) + break; + if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) break; @@ -3121,7 +3132,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size) */ void tcp_send_fin(struct sock *sk) { - struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); + struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk); struct tcp_sock *tp = tcp_sk(sk); /* Optimization, tack on the FIN if we have one skb in write queue and @@ -3129,6 +3140,7 @@ void tcp_send_fin(struct sock *sk) * Note: in the latter case, FIN packet will be sent after a timeout, * as TCP stack thinks it has already been transmitted. */ + tskb = tail; if (!tskb && tcp_under_memory_pressure(sk)) tskb = skb_rb_last(&sk->tcp_rtx_queue); @@ -3136,7 +3148,7 @@ void tcp_send_fin(struct sock *sk) TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; TCP_SKB_CB(tskb)->end_seq++; tp->write_seq++; - if (tcp_write_queue_empty(sk)) { + if (!tail) { /* This means tskb was already sent. * Pretend we included the FIN on previous transmit. * We need to set tp->snd_nxt to the value it would have diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c index 12ab5db2b71c..38d3ad141161 100644 --- a/net/ipv4/tcp_ulp.c +++ b/net/ipv4/tcp_ulp.c @@ -99,17 +99,19 @@ void tcp_get_available_ulp(char *buf, size_t maxlen) rcu_read_unlock(); } -void tcp_update_ulp(struct sock *sk, struct proto *proto) +void tcp_update_ulp(struct sock *sk, struct proto *proto, + void (*write_space)(struct sock *sk)) { struct inet_connection_sock *icsk = inet_csk(sk); if (!icsk->icsk_ulp_ops) { + sk->sk_write_space = write_space; sk->sk_prot = proto; return; } if (icsk->icsk_ulp_ops->update) - icsk->icsk_ulp_ops->update(sk, proto); + icsk->icsk_ulp_ops->update(sk, proto, write_space); } void tcp_cleanup_ulp(struct sock *sk) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4da5758cc718..93a355b6b092 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1475,7 +1475,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) * queue contains some other skb */ rmem = atomic_add_return(size, &sk->sk_rmem_alloc); - if (rmem > (size + sk->sk_rcvbuf)) + if (rmem > (size + (unsigned int)sk->sk_rcvbuf)) goto uncharge_drop; spin_lock(&list->lock); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 35b84b52b702..9ebd54752e03 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -100,12 +100,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, } static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct dst_entry *path = xdst->route; - path->ops->update_pmtu(path, sk, skb, mtu); + path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh); } static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 98d82305d6de..39d861d00377 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5231,16 +5231,16 @@ static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb, return -EINVAL; } + if (!netlink_strict_get_check(skb)) + return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, + ifa_ipv6_policy, extack); + ifm = nlmsg_data(nlh); if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request"); return -EINVAL; } - if (!netlink_strict_get_check(skb)) - return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, - ifa_ipv6_policy, extack); - err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, extack); if (err) diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index fe9cb8d1adca..e315526fa244 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -146,7 +146,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) if (IS_ERR(dst)) return NULL; - dst->ops->update_pmtu(dst, sk, NULL, mtu); + dst->ops->update_pmtu(dst, sk, NULL, mtu, true); dst = inet6_csk_route_socket(sk, &fl6); return IS_ERR(dst) ? NULL : dst; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 9d0965252ddf..ee968d980746 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1040,7 +1040,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, /* TooBig packet may have updated dst->dev's mtu */ if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) - dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu); + dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false); err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, NEXTHDR_GRE); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 754a484d35df..2f376dbc37d5 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -640,7 +640,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (rel_info > dst_mtu(skb_dst(skb2))) goto out; - skb_dst_update_pmtu(skb2, rel_info); + skb_dst_update_pmtu_no_confirm(skb2, rel_info); } icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); @@ -1132,7 +1132,7 @@ route_lookup: mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ? IPV6_MIN_MTU : IPV4_MIN_MTU); - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { *pmtu = mtu; err = -EMSGSIZE; diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 024db17386d2..6f08b760c2a7 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -479,7 +479,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) mtu = dst_mtu(dst); if (skb->len > mtu) { - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { if (mtu < IPV6_MIN_MTU) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b59940416cb5..affb51c11a25 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -95,7 +95,8 @@ static int ip6_pkt_prohibit(struct sk_buff *skb); static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); static void ip6_link_failure(struct sk_buff *skb); static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu); + struct sk_buff *skb, u32 mtu, + bool confirm_neigh); static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, @@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) } static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { } @@ -2692,7 +2694,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) } static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, - const struct ipv6hdr *iph, u32 mtu) + const struct ipv6hdr *iph, u32 mtu, + bool confirm_neigh) { const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; @@ -2710,7 +2713,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, daddr = NULL; saddr = NULL; } - dst_confirm_neigh(dst, daddr); + + if (confirm_neigh) + dst_confirm_neigh(dst, daddr); + mtu = max_t(u32, mtu, IPV6_MIN_MTU); if (mtu >= dst_mtu(dst)) return; @@ -2764,9 +2770,11 @@ out_unlock: } static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { - __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); + __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu, + confirm_neigh); } void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, @@ -2785,7 +2793,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, dst = ip6_route_output(net, NULL, &fl6); if (!dst->error) - __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu)); + __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true); dst_release(dst); } EXPORT_SYMBOL_GPL(ip6_update_pmtu); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index b2ccbc473127..98954830c40b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -944,7 +944,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, } if (tunnel->parms.iph.daddr) - skb_dst_update_pmtu(skb, mtu); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->len > mtu && !skb_is_gso(skb)) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 699e0730ce8e..af7a4b8b1e9c 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -98,12 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, } static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) + struct sk_buff *skb, u32 mtu, + bool confirm_neigh) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct dst_entry *path = xdst->route; - path->ops->update_pmtu(path, sk, skb, mtu); + path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh); } static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk, diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 204a8351efff..c29170e767a8 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c @@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb) return LLC_PDU_IS_CMD(pdu) && /* command PDU */ LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID && - !pdu->dsap ? 0 : 1; /* NULL DSAP value */ + !pdu->dsap; /* NULL DSAP value */ } static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb) @@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb) return LLC_PDU_IS_CMD(pdu) && /* command PDU */ LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST && - !pdu->dsap ? 0 : 1; /* NULL DSAP */ + !pdu->dsap; /* NULL DSAP */ } static int llc_station_ac_send_xid_r(struct sk_buff *skb) diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c index 63cb0028b02d..9fc2968856c0 100644 --- a/net/mac80211/airtime.c +++ b/net/mac80211/airtime.c @@ -442,7 +442,7 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, return 0; sband = hw->wiphy->bands[status->band]; - if (!sband || status->rate_idx > sband->n_bitrates) + if (!sband || status->rate_idx >= sband->n_bitrates) return 0; rate = &sband->bitrates[status->rate_idx]; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 4fb7f1f12109..000c742d0527 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2954,6 +2954,28 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy, return err; } +static void ieee80211_end_cac(struct wiphy *wiphy, + struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + + mutex_lock(&local->mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + /* it might be waiting for the local->mtx, but then + * by the time it gets it, sdata->wdev.cac_started + * will no longer be true + */ + cancel_delayed_work(&sdata->dfs_cac_timer_work); + + if (sdata->wdev.cac_started) { + ieee80211_vif_release_channel(sdata); + sdata->wdev.cac_started = false; + } + } + mutex_unlock(&local->mtx); +} + static struct cfg80211_beacon_data * cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) { @@ -4023,6 +4045,7 @@ const struct cfg80211_ops mac80211_config_ops = { #endif .get_channel = ieee80211_cfg_get_channel, .start_radar_detection = ieee80211_start_radar_detection, + .end_cac = ieee80211_end_cac, .channel_switch = ieee80211_channel_switch, .set_qos_map = ieee80211_set_qos_map, .set_ap_chanwidth = ieee80211_set_ap_chanwidth, diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index b3c9001d1f43..c80b1e163ea4 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -201,8 +201,6 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf, char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; u64 rx_airtime = 0, tx_airtime = 0; s64 deficit[IEEE80211_NUM_ACS]; - u32 q_depth[IEEE80211_NUM_ACS]; - u32 q_limit_l[IEEE80211_NUM_ACS], q_limit_h[IEEE80211_NUM_ACS]; ssize_t rv; int ac; @@ -214,6 +212,56 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf, rx_airtime += sta->airtime[ac].rx_airtime; tx_airtime += sta->airtime[ac].tx_airtime; deficit[ac] = sta->airtime[ac].deficit; + spin_unlock_bh(&local->active_txq_lock[ac]); + } + + p += scnprintf(p, bufsz + buf - p, + "RX: %llu us\nTX: %llu us\nWeight: %u\n" + "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n", + rx_airtime, tx_airtime, sta->airtime_weight, + deficit[0], deficit[1], deficit[2], deficit[3]); + + rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return rv; +} + +static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + struct ieee80211_local *local = sta->sdata->local; + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + spin_lock_bh(&local->active_txq_lock[ac]); + sta->airtime[ac].rx_airtime = 0; + sta->airtime[ac].tx_airtime = 0; + sta->airtime[ac].deficit = sta->airtime_weight; + spin_unlock_bh(&local->active_txq_lock[ac]); + } + + return count; +} +STA_OPS_RW(airtime); + +static ssize_t sta_aql_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + struct ieee80211_local *local = sta->sdata->local; + size_t bufsz = 400; + char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; + u32 q_depth[IEEE80211_NUM_ACS]; + u32 q_limit_l[IEEE80211_NUM_ACS], q_limit_h[IEEE80211_NUM_ACS]; + ssize_t rv; + int ac; + + if (!buf) + return -ENOMEM; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + spin_lock_bh(&local->active_txq_lock[ac]); q_limit_l[ac] = sta->airtime[ac].aql_limit_low; q_limit_h[ac] = sta->airtime[ac].aql_limit_high; spin_unlock_bh(&local->active_txq_lock[ac]); @@ -221,12 +269,8 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf, } p += scnprintf(p, bufsz + buf - p, - "RX: %llu us\nTX: %llu us\nWeight: %u\n" - "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n" "Q depth: VO: %u us VI: %u us BE: %u us BK: %u us\n" "Q limit[low/high]: VO: %u/%u VI: %u/%u BE: %u/%u BK: %u/%u\n", - rx_airtime, tx_airtime, sta->airtime_weight, - deficit[0], deficit[1], deficit[2], deficit[3], q_depth[0], q_depth[1], q_depth[2], q_depth[3], q_limit_l[0], q_limit_h[0], q_limit_l[1], q_limit_h[1], q_limit_l[2], q_limit_h[2], q_limit_l[3], q_limit_h[3]), @@ -236,11 +280,10 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf, return rv; } -static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf, +static ssize_t sta_aql_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct sta_info *sta = file->private_data; - struct ieee80211_local *local = sta->sdata->local; u32 ac, q_limit_l, q_limit_h; char _buf[100] = {}, *buf = _buf; @@ -251,7 +294,7 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf, return -EFAULT; buf[sizeof(_buf) - 1] = '\0'; - if (sscanf(buf, "queue limit %u %u %u", &ac, &q_limit_l, &q_limit_h) + if (sscanf(buf, "limit %u %u %u", &ac, &q_limit_l, &q_limit_h) != 3) return -EINVAL; @@ -261,17 +304,10 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf, sta->airtime[ac].aql_limit_low = q_limit_l; sta->airtime[ac].aql_limit_high = q_limit_h; - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - spin_lock_bh(&local->active_txq_lock[ac]); - sta->airtime[ac].rx_airtime = 0; - sta->airtime[ac].tx_airtime = 0; - sta->airtime[ac].deficit = sta->airtime_weight; - spin_unlock_bh(&local->active_txq_lock[ac]); - } - return count; } -STA_OPS_RW(airtime); +STA_OPS_RW(aql); + static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) @@ -996,6 +1032,10 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) DEBUGFS_ADD(airtime); + if (wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_AQL)) + DEBUGFS_ADD(aql); + debugfs_create_xul("driver_buffered_tids", 0400, sta->debugfs_dir, &sta->driver_buffered_tids); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 6cca0853f183..4c2b5ba3ac09 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -672,9 +672,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H; } - local->airtime_flags = AIRTIME_USE_TX | - AIRTIME_USE_RX | - AIRTIME_USE_AQL; + local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX; local->aql_threshold = IEEE80211_AQL_THRESHOLD; atomic_set(&local->aql_total_pending_airtime, 0); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 68af62306385..d69983370381 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -328,6 +328,9 @@ u32 airtime_link_metric_get(struct ieee80211_local *local, unsigned long fail_avg = ewma_mesh_fail_avg_read(&sta->mesh->fail_avg); + if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) + return MAX_METRIC; + /* Try to get rate based on HW/SW RC algorithm. * Rate is returned in units of Kbps, correct this * to comply with airtime calculation units diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 8eafd81e97b4..0f5f40678885 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1916,6 +1916,9 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, { int tx_pending; + if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) + return; + if (!tx_completed) { if (sta) atomic_add(tx_airtime, diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index ad5d8a4ae56d..c00e28585f9d 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -127,7 +127,6 @@ enum ieee80211_agg_stop_reason { /* Debugfs flags to enable/disable use of RX/TX airtime in scheduler */ #define AIRTIME_USE_TX BIT(0) #define AIRTIME_USE_RX BIT(1) -#define AIRTIME_USE_AQL BIT(2) struct airtime_info { u64 rx_airtime; diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c index 727dc9f3f3b3..e7f57bb18f6e 100644 --- a/net/mac80211/tkip.c +++ b/net/mac80211/tkip.c @@ -263,9 +263,21 @@ int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx, if ((keyid >> 6) != key->conf.keyidx) return TKIP_DECRYPT_INVALID_KEYIDX; - if (rx_ctx->ctx.state != TKIP_STATE_NOT_INIT && - (iv32 < rx_ctx->iv32 || - (iv32 == rx_ctx->iv32 && iv16 <= rx_ctx->iv16))) + /* Reject replays if the received TSC is smaller than or equal to the + * last received value in a valid message, but with an exception for + * the case where a new key has been set and no valid frame using that + * key has yet received and the local RSC was initialized to 0. This + * exception allows the very first frame sent by the transmitter to be + * accepted even if that transmitter were to use TSC 0 (IEEE 802.11 + * described TSC to be initialized to 1 whenever a new key is taken into + * use). + */ + if (iv32 < rx_ctx->iv32 || + (iv32 == rx_ctx->iv32 && + (iv16 < rx_ctx->iv16 || + (iv16 == rx_ctx->iv16 && + (rx_ctx->iv32 || rx_ctx->iv16 || + rx_ctx->ctx.state != TKIP_STATE_NOT_INIT))))) return TKIP_DECRYPT_REPLAY; if (only_iv) { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b696b9136f4c..a8a7306a1f56 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2256,6 +2256,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, payload[7]); } + /* + * Initialize skb->priority for QoS frames. This is put in the TID field + * of the frame before passing it to the driver. + */ + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *p = ieee80211_get_qos_ctl(hdr); + skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; + } + memset(info, 0, sizeof(*info)); info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | @@ -3668,7 +3677,7 @@ begin: IEEE80211_SKB_CB(skb)->control.vif = vif; - if (local->airtime_flags & AIRTIME_USE_AQL) { + if (wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) { u32 airtime; airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta, @@ -3790,7 +3799,7 @@ bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct sta_info *sta; struct ieee80211_local *local = hw_to_local(hw); - if (!(local->airtime_flags & AIRTIME_USE_AQL)) + if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) return true; if (!txq->sta) diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index 1abd6f0dc227..077a2cb65fcb 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h @@ -60,9 +60,9 @@ mtype_destroy(struct ip_set *set) if (SET_WITH_TIMEOUT(set)) del_timer_sync(&map->gc); - ip_set_free(map->members); if (set->dsize && set->extensions & IPSET_EXT_DESTROY) mtype_ext_cleanup(set); + ip_set_free(map->members); ip_set_free(map); set->data = NULL; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 169e0a04f814..cf895bc80871 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1848,6 +1848,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb, struct ip_set *set; struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; int ret = 0; + u32 lineno; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || @@ -1864,7 +1865,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb, return -IPSET_ERR_PROTOCOL; rcu_read_lock_bh(); - ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0); + ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0); rcu_read_unlock_bh(); /* Userspace can't trigger element to be re-added */ if (ret == -EAGAIN) diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index b1e300f8881b..b00866d777fe 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -208,7 +208,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu) struct rtable *ort = skb_rtable(skb); if (!skb->dev && sk && sk_fullsock(sk)) - ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu); + ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true); } static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0af1898af2b8..f475fec84536 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -895,9 +895,10 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo, } /* Resolve race on insertion if this protocol allows this. */ -static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, - enum ip_conntrack_info ctinfo, - struct nf_conntrack_tuple_hash *h) +static __cold noinline int +nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + struct nf_conntrack_tuple_hash *h) { /* This is the conntrack entry already in hashes that won race. */ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index d8d33ef52ce0..6a1c8f1f6171 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -3626,6 +3626,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list) list_for_each_entry(net, net_exit_list, exit_list) ctnetlink_net_exit(net); + + /* wait for other cpus until they are done with ctnl_notifiers */ + synchronize_rcu(); } static struct pernet_operations ctnetlink_net_ops = { diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index b6b14db3955b..b3f4a334f9d7 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -677,6 +677,9 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], unsigned int *timeouts = data; int i; + if (!timeouts) + timeouts = dn->dccp_timeout; + /* set default DCCP timeouts. */ for (i=0; i<CT_DCCP_MAX; i++) timeouts[i] = dn->dccp_timeout[i]; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index fce3d93f1541..0399ae8f1188 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -594,6 +594,9 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], struct nf_sctp_net *sn = nf_sctp_pernet(net); int i; + if (!timeouts) + timeouts = sn->timeouts; + /* set default SCTP timeouts. */ for (i=0; i<SCTP_CONNTRACK_MAX; i++) timeouts[i] = sn->timeouts[i]; diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 9889d52eda82..e33a73cb1f42 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -134,11 +134,6 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp) #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) -static inline __s32 nf_flow_timeout_delta(unsigned int timeout) -{ - return (__s32)(timeout - (u32)jiffies); -} - static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) { const struct nf_conntrack_l4proto *l4proto; @@ -232,7 +227,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) { int err; - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; err = rhashtable_insert_fast(&flow_table->rhashtable, &flow->tuplehash[0].node, diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index b9e7dd6e60ce..7ea2ddc2aa93 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -280,7 +280,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) return NF_DROP; - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; iph = ip_hdr(skb); ip_decrease_ttl(iph); skb->tstamp = 0; @@ -509,7 +509,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, if (nf_flow_nat_ipv6(flow, skb, dir) < 0) return NF_DROP; - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; ip6h = ipv6_hdr(skb); ip6h->hop_limit--; skb->tstamp = 0; diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index c54c9a6cc981..d06969af1085 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -28,6 +28,7 @@ struct nf_flow_key { struct flow_dissector_key_basic basic; union { struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_tcp tcp; struct flow_dissector_key_ports tp; @@ -57,6 +58,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match, NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); + NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp); @@ -69,15 +71,24 @@ static int nf_flow_rule_match(struct nf_flow_match *match, key->ipv4.dst = tuple->dst_v4.s_addr; mask->ipv4.dst = 0xffffffff; break; + case AF_INET6: + key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + key->basic.n_proto = htons(ETH_P_IPV6); + key->ipv6.src = tuple->src_v6; + memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src)); + key->ipv6.dst = tuple->dst_v6; + memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst)); + break; default: return -EOPNOTSUPP; } + match->dissector.used_keys |= BIT(key->control.addr_type); mask->basic.n_proto = 0xffff; switch (tuple->l4proto) { case IPPROTO_TCP: key->tcp.flags = 0; - mask->tcp.flags = TCP_FLAG_RST | TCP_FLAG_FIN; + mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16); match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP); break; case IPPROTO_UDP: @@ -96,14 +107,13 @@ static int nf_flow_rule_match(struct nf_flow_match *match, match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS); return 0; } static void flow_offload_mangle(struct flow_action_entry *entry, - enum flow_action_mangle_base htype, - u32 offset, u8 *value, u8 *mask) + enum flow_action_mangle_base htype, u32 offset, + const __be32 *value, const __be32 *mask) { entry->id = FLOW_ACTION_MANGLE; entry->mangle.htype = htype; @@ -140,12 +150,12 @@ static int flow_offload_eth_src(struct net *net, memcpy(&val16, dev->dev_addr, 2); val = val16 << 16; flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4, - (u8 *)&val, (u8 *)&mask); + &val, &mask); mask = ~0xffffffff; memcpy(&val, dev->dev_addr + 2, 4); flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8, - (u8 *)&val, (u8 *)&mask); + &val, &mask); dev_put(dev); return 0; @@ -156,27 +166,41 @@ static int flow_offload_eth_dst(struct net *net, enum flow_offload_tuple_dir dir, struct nf_flow_rule *flow_rule) { - const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple; struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); + const void *daddr = &flow->tuplehash[!dir].tuple.src_v4; + const struct dst_entry *dst_cache; + unsigned char ha[ETH_ALEN]; struct neighbour *n; u32 mask, val; + u8 nud_state; u16 val16; - n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4); + dst_cache = flow->tuplehash[dir].tuple.dst_cache; + n = dst_neigh_lookup(dst_cache, daddr); if (!n) return -ENOENT; + read_lock_bh(&n->lock); + nud_state = n->nud_state; + ether_addr_copy(ha, n->ha); + read_unlock_bh(&n->lock); + + if (!(nud_state & NUD_VALID)) { + neigh_release(n); + return -ENOENT; + } + mask = ~0xffffffff; - memcpy(&val, n->ha, 4); + memcpy(&val, ha, 4); flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0, - (u8 *)&val, (u8 *)&mask); + &val, &mask); mask = ~0x0000ffff; - memcpy(&val16, n->ha + 4, 2); + memcpy(&val16, ha + 4, 2); val = val16; flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4, - (u8 *)&val, (u8 *)&mask); + &val, &mask); neigh_release(n); return 0; @@ -206,7 +230,7 @@ static void flow_offload_ipv4_snat(struct net *net, } flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset, - (u8 *)&addr, (u8 *)&mask); + &addr, &mask); } static void flow_offload_ipv4_dnat(struct net *net, @@ -233,12 +257,12 @@ static void flow_offload_ipv4_dnat(struct net *net, } flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset, - (u8 *)&addr, (u8 *)&mask); + &addr, &mask); } static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, unsigned int offset, - u8 *addr, u8 *mask) + const __be32 *addr, const __be32 *mask) { struct flow_action_entry *entry; int i; @@ -246,8 +270,7 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) { entry = flow_action_entry_next(flow_rule); flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6, - offset + i, - &addr[i], mask); + offset + i, &addr[i], mask); } } @@ -257,23 +280,23 @@ static void flow_offload_ipv6_snat(struct net *net, struct nf_flow_rule *flow_rule) { u32 mask = ~htonl(0xffffffff); - const u8 *addr; + const __be32 *addr; u32 offset; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: - addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr; + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32; offset = offsetof(struct ipv6hdr, saddr); break; case FLOW_OFFLOAD_DIR_REPLY: - addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr; + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32; offset = offsetof(struct ipv6hdr, daddr); break; default: return; } - flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask); + flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask); } static void flow_offload_ipv6_dnat(struct net *net, @@ -282,23 +305,23 @@ static void flow_offload_ipv6_dnat(struct net *net, struct nf_flow_rule *flow_rule) { u32 mask = ~htonl(0xffffffff); - const u8 *addr; + const __be32 *addr; u32 offset; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: - addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr; + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32; offset = offsetof(struct ipv6hdr, daddr); break; case FLOW_OFFLOAD_DIR_REPLY: - addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr; + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32; offset = offsetof(struct ipv6hdr, saddr); break; default: return; } - flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask); + flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask); } static int flow_offload_l4proto(const struct flow_offload *flow) @@ -326,25 +349,28 @@ static void flow_offload_port_snat(struct net *net, struct nf_flow_rule *flow_rule) { struct flow_action_entry *entry = flow_action_entry_next(flow_rule); - u32 mask = ~htonl(0xffff0000); - __be16 port; + u32 mask, port; u32 offset; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: - port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port; + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port); offset = 0; /* offsetof(struct tcphdr, source); */ + port = htonl(port << 16); + mask = ~htonl(0xffff0000); break; case FLOW_OFFLOAD_DIR_REPLY: - port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port); offset = 0; /* offsetof(struct tcphdr, dest); */ + port = htonl(port); + mask = ~htonl(0xffff); break; default: - break; + return; } flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, - (u8 *)&port, (u8 *)&mask); + &port, &mask); } static void flow_offload_port_dnat(struct net *net, @@ -353,25 +379,28 @@ static void flow_offload_port_dnat(struct net *net, struct nf_flow_rule *flow_rule) { struct flow_action_entry *entry = flow_action_entry_next(flow_rule); - u32 mask = ~htonl(0xffff); - __be16 port; + u32 mask, port; u32 offset; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: - port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port; - offset = 0; /* offsetof(struct tcphdr, source); */ + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port); + offset = 0; /* offsetof(struct tcphdr, dest); */ + port = htonl(port); + mask = ~htonl(0xffff); break; case FLOW_OFFLOAD_DIR_REPLY: - port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; - offset = 0; /* offsetof(struct tcphdr, dest); */ + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port); + offset = 0; /* offsetof(struct tcphdr, source); */ + port = htonl(port << 16); + mask = ~htonl(0xffff0000); break; default: - break; + return; } flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, - (u8 *)&port, (u8 *)&mask); + &port, &mask); } static void flow_offload_ipv4_checksum(struct net *net, @@ -574,7 +603,7 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload, cls_flow.rule = flow_rule->rule; list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) { - err = block_cb->cb(TC_SETUP_FT, &cls_flow, + err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv); if (err < 0) continue; @@ -599,7 +628,7 @@ static void flow_offload_tuple_del(struct flow_offload_work *offload, &offload->flow->tuplehash[dir].tuple, &extack); list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) - block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv); + block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv); offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD; } @@ -656,7 +685,7 @@ static void flow_offload_tuple_stats(struct flow_offload_work *offload, &offload->flow->tuplehash[dir].tuple, &extack); list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) - block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv); + block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv); memcpy(stats, &cls_flow.stats, sizeof(*stats)); } @@ -752,9 +781,9 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable, struct flow_offload *flow) { struct flow_offload_work *offload; - s64 delta; + __s32 delta; - delta = flow->timeout - jiffies; + delta = nf_flow_timeout_delta(flow->timeout); if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) || flow->flags & FLOW_OFFLOAD_HW_DYING) return; @@ -822,7 +851,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, bo.extack = &extack; INIT_LIST_HEAD(&bo.cb_list); - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo); if (err < 0) return err; diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c index 0a59c14b5177..64eedc17037a 100644 --- a/net/netfilter/nf_nat_proto.c +++ b/net/netfilter/nf_nat_proto.c @@ -233,6 +233,19 @@ icmp_manip_pkt(struct sk_buff *skb, return false; hdr = (struct icmphdr *)(skb->data + hdroff); + switch (hdr->type) { + case ICMP_ECHO: + case ICMP_ECHOREPLY: + case ICMP_TIMESTAMP: + case ICMP_TIMESTAMPREPLY: + case ICMP_INFO_REQUEST: + case ICMP_INFO_REPLY: + case ICMP_ADDRESS: + case ICMP_ADDRESSREPLY: + break; + default: + return true; + } inet_proto_csum_replace2(&hdr->checksum, skb, hdr->un.echo.id, tuple->src.u.icmp.id, false); hdr->un.echo.id = tuple->src.u.icmp.id; diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index a2b58de82600..f8f52ff99cfb 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -189,7 +189,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, goto err; } - if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) { + if (skb_dst(skb) && !skb_dst_force(skb)) { status = -ENETDOWN; goto err; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c26a5663795e..65f51a2e9c2a 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -22,6 +22,8 @@ #include <net/net_namespace.h> #include <net/sock.h> +#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-")) + static LIST_HEAD(nf_tables_expressions); static LIST_HEAD(nf_tables_objects); static LIST_HEAD(nf_tables_flowtables); @@ -564,33 +566,34 @@ __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family) } /* - * Loading a module requires dropping mutex that guards the - * transaction. - * We first need to abort any pending transactions as once - * mutex is unlocked a different client could start a new - * transaction. It must not see any 'future generation' - * changes * as these changes will never happen. + * Loading a module requires dropping mutex that guards the transaction. + * A different client might race to start a new transaction meanwhile. Zap the + * list of pending transaction and then restore it once the mutex is grabbed + * again. Users of this function return EAGAIN which implicitly triggers the + * transaction abort path to clean up the list of pending transactions. */ #ifdef CONFIG_MODULES -static int __nf_tables_abort(struct net *net); - static void nft_request_module(struct net *net, const char *fmt, ...) { char module_name[MODULE_NAME_LEN]; + LIST_HEAD(commit_list); va_list args; int ret; - __nf_tables_abort(net); + list_splice_init(&net->nft.commit_list, &commit_list); va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); va_end(args); - if (WARN(ret >= MODULE_NAME_LEN, "truncated: '%s' (len %d)", module_name, ret)) + if (ret >= MODULE_NAME_LEN) return; mutex_unlock(&net->nft.commit_mutex); request_module("%s", module_name); mutex_lock(&net->nft.commit_mutex); + + WARN_ON_ONCE(!list_empty(&net->nft.commit_list)); + list_splice(&commit_list, &net->nft.commit_list); } #endif @@ -1045,12 +1048,18 @@ static int nft_flush_table(struct nft_ctx *ctx) } list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) { + if (!nft_is_active_next(ctx->net, flowtable)) + continue; + err = nft_delflowtable(ctx, flowtable); if (err < 0) goto out; } list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) { + if (!nft_is_active_next(ctx->net, obj)) + continue; + err = nft_delobj(ctx, obj); if (err < 0) goto out; @@ -1241,7 +1250,8 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { .len = NFT_CHAIN_MAXNAMELEN - 1 }, [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED }, [NFTA_CHAIN_POLICY] = { .type = NLA_U32 }, - [NFTA_CHAIN_TYPE] = { .type = NLA_STRING }, + [NFTA_CHAIN_TYPE] = { .type = NLA_STRING, + .len = NFT_MODULE_AUTOLOAD_LIMIT }, [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED }, [NFTA_CHAIN_FLAGS] = { .type = NLA_U32 }, }; @@ -1676,6 +1686,7 @@ static int nf_tables_parse_netdev_hooks(struct net *net, goto err_hook; } if (nft_hook_list_find(hook_list, hook)) { + kfree(hook); err = -EEXIST; goto err_hook; } @@ -2355,7 +2366,8 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net, } static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = { - [NFTA_EXPR_NAME] = { .type = NLA_STRING }, + [NFTA_EXPR_NAME] = { .type = NLA_STRING, + .len = NFT_MODULE_AUTOLOAD_LIMIT }, [NFTA_EXPR_DATA] = { .type = NLA_NESTED }, }; @@ -4198,7 +4210,8 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = { [NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY, .len = NFT_USERDATA_MAXLEN }, [NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED }, - [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING }, + [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING, + .len = NFT_OBJ_MAXNAMELEN - 1 }, }; static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { @@ -4519,8 +4532,10 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, return err; err = -EINVAL; - if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) + if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) { + nft_data_release(&elem.key.val, desc.type); return err; + } priv = set->ops->get(ctx->net, set, &elem, flags); if (IS_ERR(priv)) @@ -4756,14 +4771,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (nla[NFTA_SET_ELEM_DATA] == NULL && !(flags & NFT_SET_ELEM_INTERVAL_END)) return -EINVAL; - if (nla[NFTA_SET_ELEM_DATA] != NULL && - flags & NFT_SET_ELEM_INTERVAL_END) - return -EINVAL; } else { if (nla[NFTA_SET_ELEM_DATA] != NULL) return -EINVAL; } + if ((flags & NFT_SET_ELEM_INTERVAL_END) && + (nla[NFTA_SET_ELEM_DATA] || + nla[NFTA_SET_ELEM_OBJREF] || + nla[NFTA_SET_ELEM_TIMEOUT] || + nla[NFTA_SET_ELEM_EXPIRATION] || + nla[NFTA_SET_ELEM_USERDATA] || + nla[NFTA_SET_ELEM_EXPR])) + return -EINVAL; + timeout = 0; if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) @@ -5476,7 +5497,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - type = nft_obj_type_get(net, objtype); + type = __nft_obj_type_get(objtype); nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj); @@ -5976,6 +5997,7 @@ nft_flowtable_type_get(struct net *net, u8 family) return ERR_PTR(-ENOENT); } +/* Only called from error and netdev event paths. */ static void nft_unregister_flowtable_hook(struct net *net, struct nft_flowtable *flowtable, struct nft_hook *hook) @@ -5991,7 +6013,7 @@ static void nft_unregister_flowtable_net_hooks(struct net *net, struct nft_hook *hook; list_for_each_entry(hook, &flowtable->hook_list, list) - nft_unregister_flowtable_hook(net, flowtable, hook); + nf_unregister_net_hook(net, &hook->ops); } static int nft_register_flowtable_net_hooks(struct net *net, @@ -6440,12 +6462,14 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) { struct nft_hook *hook, *next; + flowtable->data.type->free(&flowtable->data); list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { + flowtable->data.type->setup(&flowtable->data, hook->ops.dev, + FLOW_BLOCK_UNBIND); list_del_rcu(&hook->list); kfree(hook); } kfree(flowtable->name); - flowtable->data.type->free(&flowtable->data); module_put(flowtable->data.type->owner); kfree(flowtable); } @@ -6489,6 +6513,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev, if (hook->ops.dev != dev) continue; + /* flow_offload_netdev_event() cleans up entries for us. */ nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook); list_del_rcu(&hook->list); kfree_rcu(hook, rcu); diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 431f3b803bfb..a9ea29afb09f 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -44,6 +44,9 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, expr = nft_expr_next(expr); } + if (num_actions == 0) + return ERR_PTR(-EOPNOTSUPP); + flow = nft_flow_rule_alloc(num_actions); if (!flow) return ERR_PTR(-ENOMEM); @@ -577,6 +580,9 @@ static int nft_offload_netdev_event(struct notifier_block *this, struct net *net = dev_net(dev); struct nft_chain *chain; + if (event != NETDEV_UNREGISTER) + return NOTIFY_DONE; + mutex_lock(&net->nft.commit_mutex); chain = __nft_offload_get_chain(dev); if (chain) diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index 02afa752dd2e..10e9d50e4e19 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c @@ -80,7 +80,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, tb[NFTA_BITWISE_MASK]); if (err < 0) return err; - if (d1.len != priv->len) { + if (d1.type != NFT_DATA_VALUE || d1.len != priv->len) { err = -EINVAL; goto err1; } @@ -89,7 +89,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, tb[NFTA_BITWISE_XOR]); if (err < 0) goto err1; - if (d2.len != priv->len) { + if (d2.type != NFT_DATA_VALUE || d2.len != priv->len) { err = -EINVAL; goto err2; } diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index b8092069f868..8a28c127effc 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -81,6 +81,12 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, if (err < 0) return err; + if (desc.type != NFT_DATA_VALUE) { + err = -EINVAL; + nft_data_release(&priv->data, desc.type); + return err; + } + priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); err = nft_validate_register_load(priv->sreg, desc.len); if (err < 0) diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index dd82ff2ee19f..b70b48996801 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -200,9 +200,6 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx, static void nft_flow_offload_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { - struct nft_flow_offload *priv = nft_expr_priv(expr); - - priv->flowtable->use--; nf_ct_netns_put(ctx->net, ctx->family); } diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index 4701fa8a45e7..89efcc5a533d 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c @@ -66,11 +66,21 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr if (err < 0) return err; + if (desc_from.type != NFT_DATA_VALUE) { + err = -EINVAL; + goto err1; + } + err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to), &desc_to, tb[NFTA_RANGE_TO_DATA]); if (err < 0) goto err1; + if (desc_to.type != NFT_DATA_VALUE) { + err = -EINVAL; + goto err2; + } + if (desc_from.len != desc_to.len) { err = -EINVAL; goto err2; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 57123259452f..a9f804f7a04a 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -74,8 +74,13 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set parent = rcu_dereference_raw(parent->rb_left); continue; } - if (nft_rbtree_interval_end(rbe)) - goto out; + if (nft_rbtree_interval_end(rbe)) { + if (nft_set_is_anonymous(set)) + return false; + parent = rcu_dereference_raw(parent->rb_left); + interval = NULL; + continue; + } *ext = &rbe->ext; return true; @@ -88,7 +93,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set *ext = &interval->ext; return true; } -out: + return false; } @@ -139,8 +144,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, if (flags & NFT_SET_ELEM_INTERVAL_END) interval = rbe; } else { - if (!nft_set_elem_active(&rbe->ext, genmask)) + if (!nft_set_elem_active(&rbe->ext, genmask)) { parent = rcu_dereference_raw(parent->rb_left); + continue; + } if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) || (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) == @@ -148,7 +155,11 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, *elem = rbe; return true; } - return false; + + if (nft_rbtree_interval_end(rbe)) + interval = NULL; + + parent = rcu_dereference_raw(parent->rb_left); } } diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index 4c33dfc9dab5..d67f83a0958d 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -50,7 +50,7 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr, taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr); if (priv->sreg_port) - tport = regs->data[priv->sreg_port]; + tport = nft_reg_load16(®s->data[priv->sreg_port]); if (!tport) tport = hp->dest; @@ -117,7 +117,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr, taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr); if (priv->sreg_port) - tport = regs->data[priv->sreg_port]; + tport = nft_reg_load16(®s->data[priv->sreg_port]); if (!tport) tport = hp->dest; diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 3d4c2ae605a8..5284fcf16be7 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -76,7 +76,7 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx, struct nft_tunnel *priv = nft_expr_priv(expr); u32 len; - if (!tb[NFTA_TUNNEL_KEY] && + if (!tb[NFTA_TUNNEL_KEY] || !tb[NFTA_TUNNEL_DREG]) return -EINVAL; @@ -266,6 +266,9 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, if (err < 0) return err; + if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]) + return -EINVAL; + version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])); switch (version) { case ERSPAN_VERSION: diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c index 78fe622eba65..11b554ce07ff 100644 --- a/net/nfc/nci/uart.c +++ b/net/nfc/nci/uart.c @@ -346,7 +346,7 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data, nu->rx_packet_len = -1; nu->rx_skb = nci_skb_alloc(nu->ndev, NCI_MAX_PACKET_SIZE, - GFP_KERNEL); + GFP_ATOMIC); if (!nu->rx_skb) return -ENOMEM; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 53c1d41fb1c9..118cd66b7516 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -544,7 +544,8 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po, msec = 1; div = ecmd.base.speed / 1000; } - } + } else + return DEFAULT_PRB_RETIRE_TOV; mbits = (blk_size_in_bytes * 8) / (1024 * 1024); diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 88f98f27ad88..3d24d45be5f4 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -196,7 +196,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, hdr->size = cpu_to_le32(len); hdr->confirm_rx = 0; - skb_put_padto(skb, ALIGN(len, 4)); + skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); mutex_lock(&node->ep_lock); if (node->ep) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 461d75274fb3..971c73c7d34c 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1002,10 +1002,13 @@ static void rfkill_sync_work(struct work_struct *work) int __must_check rfkill_register(struct rfkill *rfkill) { static unsigned long rfkill_no; - struct device *dev = &rfkill->dev; + struct device *dev; int error; - BUG_ON(!rfkill); + if (!rfkill) + return -EINVAL; + + dev = &rfkill->dev; mutex_lock(&rfkill_global_mutex); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 7c7d10f2e0c1..5e99df80e80a 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -209,6 +209,7 @@ struct rxrpc_skb_priv { struct rxrpc_security { const char *name; /* name of this service */ u8 security_index; /* security type provided */ + u32 no_key_abort; /* Abort code indicating no key */ /* Initialise a security service */ int (*init)(void); @@ -977,8 +978,9 @@ static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn, struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, struct sk_buff *); struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); -void rxrpc_new_incoming_connection(struct rxrpc_sock *, - struct rxrpc_connection *, struct sk_buff *); +void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *, + const struct rxrpc_security *, struct key *, + struct sk_buff *); void rxrpc_unpublish_service_conn(struct rxrpc_connection *); /* @@ -1103,7 +1105,9 @@ extern const struct rxrpc_security rxkad; int __init rxrpc_init_security(void); void rxrpc_exit_security(void); int rxrpc_init_client_conn_security(struct rxrpc_connection *); -int rxrpc_init_server_conn_security(struct rxrpc_connection *); +bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *, + const struct rxrpc_security **, struct key **, + struct sk_buff *); /* * sendmsg.c diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 135bf5cd8dd5..70e44abf106c 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -240,6 +240,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) } /* + * Ping the other end to fill our RTT cache and to retrieve the rwind + * and MTU parameters. + */ +static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + ktime_t now = skb->tstamp; + + if (call->peer->rtt_usage < 3 || + ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) + rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, + true, true, + rxrpc_propose_ack_ping_for_params); +} + +/* * Allocate a new incoming call from the prealloc pool, along with a connection * and a peer as necessary. */ @@ -247,6 +263,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_local *local, struct rxrpc_peer *peer, struct rxrpc_connection *conn, + const struct rxrpc_security *sec, + struct key *key, struct sk_buff *skb) { struct rxrpc_backlog *b = rx->backlog; @@ -294,7 +312,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, conn->params.local = rxrpc_get_local(local); conn->params.peer = peer; rxrpc_see_connection(conn); - rxrpc_new_incoming_connection(rx, conn, skb); + rxrpc_new_incoming_connection(rx, conn, sec, key, skb); } else { rxrpc_get_connection(conn); } @@ -333,9 +351,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + const struct rxrpc_security *sec = NULL; struct rxrpc_connection *conn; struct rxrpc_peer *peer = NULL; - struct rxrpc_call *call; + struct rxrpc_call *call = NULL; + struct key *key = NULL; _enter(""); @@ -346,9 +366,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; skb->priority = RX_INVALID_OPERATION; - _leave(" = NULL [close]"); - call = NULL; - goto out; + goto no_call; } /* The peer, connection and call may all have sprung into existence due @@ -358,29 +376,19 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, */ conn = rxrpc_find_connection_rcu(local, skb, &peer); - call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); + if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb)) + goto no_call; + + call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb); + key_put(key); if (!call) { skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; - _leave(" = NULL [busy]"); - call = NULL; - goto out; + goto no_call; } trace_rxrpc_receive(call, rxrpc_receive_incoming, sp->hdr.serial, sp->hdr.seq); - /* Lock the call to prevent rxrpc_kernel_send/recv_data() and - * sendmsg()/recvmsg() inconveniently stealing the mutex once the - * notification is generated. - * - * The BUG should never happen because the kernel should be well - * behaved enough not to access the call before the first notification - * event and userspace is prevented from doing so until the state is - * appropriate. - */ - if (!mutex_trylock(&call->user_mutex)) - BUG(); - /* Make the call live. */ rxrpc_incoming_call(rx, call, skb); conn = call->conn; @@ -421,6 +429,9 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, BUG(); } spin_unlock(&conn->state_lock); + spin_unlock(&rx->incoming_lock); + + rxrpc_send_ping(call, skb); if (call->state == RXRPC_CALL_SERVER_ACCEPTING) rxrpc_notify_socket(call); @@ -433,9 +444,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, rxrpc_put_call(call, rxrpc_call_put); _leave(" = %p{%d}", call, call->debug_id); -out: - spin_unlock(&rx->incoming_lock); return call; + +no_call: + spin_unlock(&rx->incoming_lock); + _leave(" = NULL [%u]", skb->mark); + return NULL; } /* diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index a1ceef4f5cd0..808a4723f868 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -376,21 +376,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn) _enter("{%d}", conn->debug_id); ASSERT(conn->security_ix != 0); - - if (!conn->params.key) { - _debug("set up security"); - ret = rxrpc_init_server_conn_security(conn); - switch (ret) { - case 0: - break; - case -ENOENT: - abort_code = RX_CALL_DEAD; - goto abort; - default: - abort_code = RXKADNOAUTH; - goto abort; - } - } + ASSERT(conn->server_key); if (conn->security->issue_challenge(conn) < 0) { abort_code = RX_CALL_DEAD; diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c index 123d6ceab15c..21da48e3d2e5 100644 --- a/net/rxrpc/conn_service.c +++ b/net/rxrpc/conn_service.c @@ -148,6 +148,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn */ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, struct rxrpc_connection *conn, + const struct rxrpc_security *sec, + struct key *key, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); @@ -160,6 +162,8 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, conn->service_id = sp->hdr.serviceId; conn->security_ix = sp->hdr.securityIndex; conn->out_clientflag = 0; + conn->security = sec; + conn->server_key = key_get(key); if (conn->security_ix) conn->state = RXRPC_CONN_SERVICE_UNSECURED; else diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 157be1ff8697..86bd133b4fa0 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -193,22 +193,6 @@ send_extra_data: } /* - * Ping the other end to fill our RTT cache and to retrieve the rwind - * and MTU parameters. - */ -static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) -{ - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - ktime_t now = skb->tstamp; - - if (call->peer->rtt_usage < 3 || - ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) - rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, - true, true, - rxrpc_propose_ack_ping_for_params); -} - -/* * Apply a hard ACK by advancing the Tx window. */ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, @@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) call = rxrpc_new_incoming_call(local, rx, skb); if (!call) goto reject_packet; - rxrpc_send_ping(call, skb); - mutex_unlock(&call->user_mutex); } /* Process a call packet; this either discards or passes on the ref diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 8d8aa3c230b5..098f1f9ec53b 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -648,9 +648,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) u32 serial; int ret; - _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); + _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); - ret = key_validate(conn->params.key); + ret = key_validate(conn->server_key); if (ret < 0) return ret; @@ -1293,6 +1293,7 @@ static void rxkad_exit(void) const struct rxrpc_security rxkad = { .name = "rxkad", .security_index = RXRPC_SECURITY_RXKAD, + .no_key_abort = RXKADUNKNOWNKEY, .init = rxkad_init, .exit = rxkad_exit, .init_connection_security = rxkad_init_connection_security, diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c index a4c47d2b7054..9b1fb9ed0717 100644 --- a/net/rxrpc/security.c +++ b/net/rxrpc/security.c @@ -101,62 +101,58 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) } /* - * initialise the security on a server connection + * Find the security key for a server connection. */ -int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) +bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx, + const struct rxrpc_security **_sec, + struct key **_key, + struct sk_buff *skb) { const struct rxrpc_security *sec; - struct rxrpc_local *local = conn->params.local; - struct rxrpc_sock *rx; - struct key *key; - key_ref_t kref; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + key_ref_t kref = NULL; char kdesc[5 + 1 + 3 + 1]; _enter(""); - sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix); + sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex); - sec = rxrpc_security_lookup(conn->security_ix); + sec = rxrpc_security_lookup(sp->hdr.securityIndex); if (!sec) { - _leave(" = -ENOKEY [lookup]"); - return -ENOKEY; + trace_rxrpc_abort(0, "SVS", + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + RX_INVALID_OPERATION, EKEYREJECTED); + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; + skb->priority = RX_INVALID_OPERATION; + return false; } - /* find the service */ - read_lock(&local->services_lock); - rx = rcu_dereference_protected(local->service, - lockdep_is_held(&local->services_lock)); - if (rx && (rx->srx.srx_service == conn->service_id || - rx->second_service == conn->service_id)) - goto found_service; + if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE) + goto out; - /* the service appears to have died */ - read_unlock(&local->services_lock); - _leave(" = -ENOENT"); - return -ENOENT; - -found_service: if (!rx->securities) { - read_unlock(&local->services_lock); - _leave(" = -ENOKEY"); - return -ENOKEY; + trace_rxrpc_abort(0, "SVR", + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + RX_INVALID_OPERATION, EKEYREJECTED); + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; + skb->priority = RX_INVALID_OPERATION; + return false; } /* look through the service's keyring */ kref = keyring_search(make_key_ref(rx->securities, 1UL), &key_type_rxrpc_s, kdesc, true); if (IS_ERR(kref)) { - read_unlock(&local->services_lock); - _leave(" = %ld [search]", PTR_ERR(kref)); - return PTR_ERR(kref); + trace_rxrpc_abort(0, "SVK", + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + sec->no_key_abort, EKEYREJECTED); + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; + skb->priority = sec->no_key_abort; + return false; } - key = key_ref_to_ptr(kref); - read_unlock(&local->services_lock); - - conn->server_key = key; - conn->security = sec; - - _leave(" = 0"); - return 0; +out: + *_sec = sec; + *_key = key_ref_to_ptr(kref); + return true; } diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 40038c321b4a..19649623493b 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -360,6 +360,16 @@ static int tcf_ctinfo_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static void tcf_ctinfo_cleanup(struct tc_action *a) +{ + struct tcf_ctinfo *ci = to_ctinfo(a); + struct tcf_ctinfo_params *cp; + + cp = rcu_dereference_protected(ci->params, 1); + if (cp) + kfree_rcu(cp, rcu); +} + static struct tc_action_ops act_ctinfo_ops = { .kind = "ctinfo", .id = TCA_ID_CTINFO, @@ -367,6 +377,7 @@ static struct tc_action_ops act_ctinfo_ops = { .act = tcf_ctinfo_act, .dump = tcf_ctinfo_dump, .init = tcf_ctinfo_init, + .cleanup= tcf_ctinfo_cleanup, .walk = tcf_ctinfo_walker, .lookup = tcf_ctinfo_search, .size = sizeof(struct tcf_ctinfo), diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 5e6379028fc3..c1fcd85719d6 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -537,6 +537,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, } ife = to_ife(*a); + if (ret == ACT_P_CREATED) + INIT_LIST_HEAD(&ife->metalist); + err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; @@ -566,10 +569,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, p->eth_type = ife_type; } - - if (ret == ACT_P_CREATED) - INIT_LIST_HEAD(&ife->metalist); - if (tb[TCA_IFE_METALST]) { err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], NULL, diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 1e3eb3a97532..1ad300e6dbc0 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -219,8 +219,10 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, bool use_reinsert; bool want_ingress; bool is_redirect; + bool expects_nh; int m_eaction; int mac_len; + bool at_nh; rec_level = __this_cpu_inc_return(mirred_rec_level); if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) { @@ -261,19 +263,19 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, goto out; } - /* If action's target direction differs than filter's direction, - * and devices expect a mac header on xmit, then mac push/pull is - * needed. - */ want_ingress = tcf_mirred_act_wants_ingress(m_eaction); - if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) { - if (!skb_at_tc_ingress(skb)) { - /* caught at egress, act ingress: pull mac */ - mac_len = skb_network_header(skb) - skb_mac_header(skb); + + expects_nh = want_ingress || !m_mac_header_xmit; + at_nh = skb->data == skb_network_header(skb); + if (at_nh != expects_nh) { + mac_len = skb_at_tc_ingress(skb) ? skb->mac_len : + skb_network_header(skb) - skb_mac_header(skb); + if (expects_nh) { + /* target device/action expect data at nh */ skb_pull_rcsum(skb2, mac_len); } else { - /* caught at ingress, act egress: push mac */ - skb_push_rcsum(skb2, skb->mac_len); + /* target device/action expect data at mac */ + skb_push_rcsum(skb2, mac_len); } } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 6a0eacafdb19..76e0d122616a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -308,33 +308,12 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, tcf_proto_destroy(tp, rtnl_held, true, extack); } -static int walker_check_empty(struct tcf_proto *tp, void *fh, - struct tcf_walker *arg) +static bool tcf_proto_check_delete(struct tcf_proto *tp) { - if (fh) { - arg->nonempty = true; - return -1; - } - return 0; -} - -static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held) -{ - struct tcf_walker walker = { .fn = walker_check_empty, }; - - if (tp->ops->walk) { - tp->ops->walk(tp, &walker, rtnl_held); - return !walker.nonempty; - } - return true; -} + if (tp->ops->delete_empty) + return tp->ops->delete_empty(tp); -static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held) -{ - spin_lock(&tp->lock); - if (tcf_proto_is_empty(tp, rtnl_held)) - tp->deleting = true; - spin_unlock(&tp->lock); + tp->deleting = true; return tp->deleting; } @@ -1751,7 +1730,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, * concurrently. * Mark tp for deletion if it is empty. */ - if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) { + if (!tp_iter || !tcf_proto_check_delete(tp)) { mutex_unlock(&chain->filter_chain_lock); return; } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 0d125de54285..b0f42e62dd76 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -2773,6 +2773,17 @@ static void fl_bind_class(void *fh, u32 classid, unsigned long cl) f->res.class = cl; } +static bool fl_delete_empty(struct tcf_proto *tp) +{ + struct cls_fl_head *head = fl_head_dereference(tp); + + spin_lock(&tp->lock); + tp->deleting = idr_is_empty(&head->handle_idr); + spin_unlock(&tp->lock); + + return tp->deleting; +} + static struct tcf_proto_ops cls_fl_ops __read_mostly = { .kind = "flower", .classify = fl_classify, @@ -2782,6 +2793,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = { .put = fl_put, .change = fl_change, .delete = fl_delete, + .delete_empty = fl_delete_empty, .walk = fl_walk, .reoffload = fl_reoffload, .hw_add = fl_hw_add, diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index e0f40400f679..2277369feae5 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1769,7 +1769,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, q->avg_window_begin)); u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; - do_div(b, window_interval); + b = div64_u64(b, window_interval); q->avg_peak_bandwidth = cake_ewma(q->avg_peak_bandwidth, b, b > q->avg_peak_bandwidth ? 2 : 8); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index b1c7e726ce5d..a5a295477ecc 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -301,6 +301,9 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) f->socket_hash != sk->sk_hash)) { f->credit = q->initial_quantum; f->socket_hash = sk->sk_hash; + if (q->rate_enable) + smp_store_release(&sk->sk_pacing_status, + SK_PACING_FQ); if (fq_flow_is_throttled(f)) fq_flow_unset_throttled(q, f); f->time_next_packet = 0ULL; @@ -322,8 +325,12 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) fq_flow_set_detached(f); f->sk = sk; - if (skb->sk == sk) + if (skb->sk == sk) { f->socket_hash = sk->sk_hash; + if (q->rate_enable) + smp_store_release(&sk->sk_pacing_status, + SK_PACING_FQ); + } f->credit = q->initial_quantum; rb_link_node(&f->fq_node, parent, p); @@ -428,17 +435,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, f->qlen++; qdisc_qstats_backlog_inc(sch, skb); if (fq_flow_is_detached(f)) { - struct sock *sk = skb->sk; - fq_flow_add_tail(&q->new_flows, f); if (time_after(jiffies, f->age + q->flow_refill_delay)) f->credit = max_t(u32, f->credit, q->quantum); - if (sk && q->rate_enable) { - if (unlikely(smp_load_acquire(&sk->sk_pacing_status) != - SK_PACING_FQ)) - smp_store_release(&sk->sk_pacing_status, - SK_PACING_FQ); - } q->inactive_flows--; } @@ -787,10 +786,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_FQ_QUANTUM]) { u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); - if (quantum > 0) + if (quantum > 0 && quantum <= (1 << 20)) { q->quantum = quantum; - else + } else { + NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); err = -EINVAL; + } } if (tb[TCA_FQ_INITIAL_QUANTUM]) diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 18b884cfdfe8..647941702f9f 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -292,8 +292,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct tc_prio_qopt_offload graft_offload; unsigned long band = arg - 1; - if (new == NULL) - new = &noop_qdisc; + if (!new) { + new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + TC_H_MAKE(sch->handle, arg), extack); + if (!new) + new = &noop_qdisc; + else + qdisc_hash_add(new, true); + } *old = qdisc_replace(sch, new, &q->queues[band]); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index fbbf19128c2d..78af2fcf90cc 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -227,6 +227,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, sa->sin_port = sh->dest; sa->sin_addr.s_addr = ip_hdr(skb)->daddr; } + memset(sa->sin_zero, 0, sizeof(sa->sin_zero)); } /* Initialize an sctp_addr from a socket. */ @@ -235,6 +236,7 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) addr->v4.sin_family = AF_INET; addr->v4.sin_port = 0; addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr; + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); } /* Initialize sk->sk_rcv_saddr from sctp_addr. */ @@ -257,6 +259,7 @@ static void sctp_v4_from_addr_param(union sctp_addr *addr, addr->v4.sin_family = AF_INET; addr->v4.sin_port = port; addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); } /* Initialize an address parameter from a sctp_addr and return the length @@ -281,6 +284,7 @@ static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4, saddr->v4.sin_family = AF_INET; saddr->v4.sin_port = port; saddr->v4.sin_addr.s_addr = fl4->saddr; + memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero)); } /* Compare two addresses exactly. */ @@ -303,6 +307,7 @@ static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port) addr->v4.sin_family = AF_INET; addr->v4.sin_addr.s_addr = htonl(INADDR_ANY); addr->v4.sin_port = port; + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); } /* Is this a wildcard address? */ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index acd737d4c0e0..834e9f82afed 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1363,8 +1363,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, /* Generate an INIT ACK chunk. */ new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, 0); - if (!new_obj) - goto nomem; + if (!new_obj) { + error = -ENOMEM; + break; + } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); @@ -1386,7 +1388,8 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, if (!new_obj) { if (cmd->obj.chunk) sctp_chunk_free(cmd->obj.chunk); - goto nomem; + error = -ENOMEM; + break; } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); @@ -1433,8 +1436,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, /* Generate a SHUTDOWN chunk. */ new_obj = sctp_make_shutdown(asoc, chunk); - if (!new_obj) - goto nomem; + if (!new_obj) { + error = -ENOMEM; + break; + } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); break; @@ -1770,11 +1775,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, break; } - if (error) + if (error) { + cmd = sctp_next_cmd(commands); + while (cmd) { + if (cmd->verb == SCTP_CMD_REPLY) + sctp_chunk_free(cmd->obj.chunk); + cmd = sctp_next_cmd(commands); + } break; + } } -out: /* If this is in response to a received chunk, wait until * we are done with the packet to open the queue so that we don't * send multiple packets in response to a single request. @@ -1789,7 +1800,4 @@ out: sp->data_ready_signalled = 0; return error; -nomem: - error = -ENOMEM; - goto out; } diff --git a/net/sctp/stream.c b/net/sctp/stream.c index e83cdaa2ab76..c1a100d2fed3 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -119,7 +119,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, * a new one with new outcnt to save memory if needed. */ if (outcnt == stream->outcnt) - goto in; + goto handle_in; /* Filter out chunks queued on streams that won't exist anymore */ sched->unsched_all(stream); @@ -128,24 +128,28 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, ret = sctp_stream_alloc_out(stream, outcnt, gfp); if (ret) - goto out; + goto out_err; for (i = 0; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; -in: +handle_in: sctp_stream_interleave_init(stream); if (!incnt) goto out; ret = sctp_stream_alloc_in(stream, incnt, gfp); - if (ret) { - sched->free(stream); - genradix_free(&stream->out); - stream->outcnt = 0; - goto out; - } + if (ret) + goto in_err; + goto out; + +in_err: + sched->free(stream); + genradix_free(&stream->in); +out_err: + genradix_free(&stream->out); + stream->outcnt = 0; out: return ret; } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 7235a6032671..3bbe1a58ec87 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -263,7 +263,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) pf->af->from_sk(&addr, sk); pf->to_sk_daddr(&t->ipaddr, sk); - dst->ops->update_pmtu(dst, sk, NULL, pmtu); + dst->ops->update_pmtu(dst, sk, NULL, pmtu, true); pf->to_sk_daddr(&addr, sk); dst = sctp_transport_dst_check(t); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index b997072c72e5..cee5bf4a9bb9 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -857,6 +857,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, goto out; sock_hold(&smc->sk); /* sock put in passive closing */ + if (smc->use_fallback) + goto out; if (flags & O_NONBLOCK) { if (schedule_work(&smc->connect_work)) smc->connect_nonblock = 1; @@ -1721,8 +1723,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, sk->sk_err = smc->clcsock->sk->sk_err; sk->sk_error_report(sk); } - if (rc) - return rc; if (optlen < sizeof(int)) return -EINVAL; @@ -1730,6 +1730,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, return -EFAULT; lock_sock(sk); + if (rc || smc->use_fallback) + goto out; switch (optname) { case TCP_ULP: case TCP_FASTOPEN: @@ -1741,15 +1743,14 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, smc_switch_to_fallback(smc); smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; } else { - if (!smc->use_fallback) - rc = -EINVAL; + rc = -EINVAL; } break; case TCP_NODELAY: if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_CLOSED) { - if (val && !smc->use_fallback) + if (val) mod_delayed_work(system_wq, &smc->conn.tx_work, 0); } @@ -1758,7 +1759,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_CLOSED) { - if (!val && !smc->use_fallback) + if (!val) mod_delayed_work(system_wq, &smc->conn.tx_work, 0); } @@ -1769,6 +1770,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, default: break; } +out: release_sock(sk); return rc; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index bb92c7c6214c..e419ff277e55 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1287,7 +1287,7 @@ static int smc_core_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { smc_lgrs_shutdown(); - + smc_ib_unregister_client(); return 0; } diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 77c7dd7f05e8..fda3889993cb 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -77,7 +77,7 @@ static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt, struct rpcrdma_sendctx *sc); static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt); -static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf); +static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt); static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt); static struct rpcrdma_regbuf * @@ -244,6 +244,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) ia->ri_id->device->name, rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); #endif + init_completion(&ia->ri_remove_done); set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); ep->rep_connected = -ENODEV; xprt_force_disconnect(xprt); @@ -297,7 +298,6 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) int rc; init_completion(&ia->ri_done); - init_completion(&ia->ri_remove_done); id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, xprt, RDMA_PS_TCP, IB_QPT_RC); @@ -421,7 +421,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia) /* The ULP is responsible for ensuring all DMA * mappings and MRs are gone. */ - rpcrdma_reps_destroy(buf); + rpcrdma_reps_unmap(r_xprt); list_for_each_entry(req, &buf->rb_allreqs, rl_all) { rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf); rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); @@ -599,6 +599,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, struct ib_qp_init_attr *qp_init_attr) { struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct rpcrdma_ep *ep = &r_xprt->rx_ep; int rc, err; trace_xprtrdma_reinsert(r_xprt); @@ -613,6 +614,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); goto out2; } + memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr)); rc = -ENETUNREACH; err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr); @@ -1090,6 +1092,7 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; rep->rr_recv_wr.num_sge = 1; rep->rr_temp = temp; + list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps); return rep; out_free: @@ -1100,6 +1103,7 @@ out: static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep) { + list_del(&rep->rr_all); rpcrdma_regbuf_free(rep->rr_rdmabuf); kfree(rep); } @@ -1118,10 +1122,16 @@ static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf) static void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep) { - if (!rep->rr_temp) - llist_add(&rep->rr_node, &buf->rb_free_reps); - else - rpcrdma_rep_destroy(rep); + llist_add(&rep->rr_node, &buf->rb_free_reps); +} + +static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct rpcrdma_rep *rep; + + list_for_each_entry(rep, &buf->rb_all_reps, rr_all) + rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); } static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf) @@ -1152,6 +1162,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) INIT_LIST_HEAD(&buf->rb_send_bufs); INIT_LIST_HEAD(&buf->rb_allreqs); + INIT_LIST_HEAD(&buf->rb_all_reps); rc = -ENOMEM; for (i = 0; i < buf->rb_max_requests; i++) { @@ -1504,6 +1515,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) wr = NULL; while (needed) { rep = rpcrdma_rep_get_locked(buf); + if (rep && rep->rr_temp) { + rpcrdma_rep_destroy(rep); + continue; + } if (!rep) rep = rpcrdma_rep_create(r_xprt, temp); if (!rep) diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 5d15140a0266..d796d68609ed 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -203,6 +203,7 @@ struct rpcrdma_rep { struct xdr_stream rr_stream; struct llist_node rr_node; struct ib_recv_wr rr_recv_wr; + struct list_head rr_all; }; /* To reduce the rate at which a transport invokes ib_post_recv @@ -368,6 +369,7 @@ struct rpcrdma_buffer { struct list_head rb_allreqs; struct list_head rb_all_mrs; + struct list_head rb_all_reps; struct llist_head rb_free_reps; diff --git a/net/tipc/Makefile b/net/tipc/Makefile index 11255e970dd4..ee49a9f1dd4f 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -9,7 +9,7 @@ tipc-y += addr.o bcast.o bearer.o \ core.o link.o discover.o msg.o \ name_distr.o subscr.o monitor.o name_table.o net.o \ netlink.o netlink_compat.o node.o socket.o eth_media.o \ - topsrv.o socket.o group.o trace.o + topsrv.o group.o trace.o CFLAGS_trace.o += -I$(src) @@ -20,5 +20,3 @@ tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o obj-$(CONFIG_TIPC_DIAG) += diag.o - -tipc_diag-y := diag.o diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 55aeba681cf4..656ebc79c64e 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -305,17 +305,17 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts, * @skb: socket buffer to copy * @method: send method to be used * @dests: destination nodes for message. - * @cong_link_cnt: returns number of encountered congested destination links * Returns 0 if success, otherwise errno */ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, struct tipc_mc_method *method, - struct tipc_nlist *dests, - u16 *cong_link_cnt) + struct tipc_nlist *dests) { struct tipc_msg *hdr, *_hdr; struct sk_buff_head tmpq; struct sk_buff *_skb; + u16 cong_link_cnt; + int rc = 0; /* Is a cluster supporting with new capabilities ? */ if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL)) @@ -343,18 +343,19 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, _hdr = buf_msg(_skb); msg_set_size(_hdr, MCAST_H_SIZE); msg_set_is_rcast(_hdr, !msg_is_rcast(hdr)); + msg_set_errcode(_hdr, TIPC_ERR_NO_PORT); __skb_queue_head_init(&tmpq); __skb_queue_tail(&tmpq, _skb); if (method->rcast) - tipc_bcast_xmit(net, &tmpq, cong_link_cnt); + rc = tipc_bcast_xmit(net, &tmpq, &cong_link_cnt); else - tipc_rcast_xmit(net, &tmpq, dests, cong_link_cnt); + rc = tipc_rcast_xmit(net, &tmpq, dests, &cong_link_cnt); /* This queue should normally be empty by now */ __skb_queue_purge(&tmpq); - return 0; + return rc; } /* tipc_mcast_xmit - deliver message to indicated destination nodes @@ -396,9 +397,14 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, msg_set_is_rcast(hdr, method->rcast); /* Switch method ? */ - if (rcast != method->rcast) - tipc_mcast_send_sync(net, skb, method, - dests, cong_link_cnt); + if (rcast != method->rcast) { + rc = tipc_mcast_send_sync(net, skb, method, dests); + if (unlikely(rc)) { + pr_err("Unable to send SYN: method %d, rc %d\n", + rcast, rc); + goto exit; + } + } if (method->rcast) rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt); diff --git a/net/tipc/discover.c b/net/tipc/discover.c index b043e8c6397a..bfe43da127c0 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -194,6 +194,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb, { struct tipc_net *tn = tipc_net(net); struct tipc_msg *hdr = buf_msg(skb); + u32 pnet_hash = msg_peer_net_hash(hdr); u16 caps = msg_node_capabilities(hdr); bool legacy = tn->legacy_addr_format; u32 sugg = msg_sugg_node_addr(hdr); @@ -242,9 +243,8 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb, return; if (!tipc_in_scope(legacy, b->domain, src)) return; - tipc_node_check_dest(net, src, peer_id, b, caps, signature, - msg_peer_net_hash(hdr), &maddr, &respond, - &dupl_addr); + tipc_node_check_dest(net, src, peer_id, b, caps, signature, pnet_hash, + &maddr, &respond, &dupl_addr); if (dupl_addr) disc_dupl_alert(b, src, &maddr); if (!respond) diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 92d04dc2a44b..359b2bc888cf 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -36,6 +36,7 @@ #include <net/sock.h> #include <linux/list_sort.h> +#include <linux/rbtree_augmented.h> #include "core.h" #include "netlink.h" #include "name_table.h" @@ -51,6 +52,7 @@ * @lower: service range lower bound * @upper: service range upper bound * @tree_node: member of service range RB tree + * @max: largest 'upper' in this node subtree * @local_publ: list of identical publications made from this node * Used by closest_first lookup and multicast lookup algorithm * @all_publ: all publications identical to this one, whatever node and scope @@ -60,6 +62,7 @@ struct service_range { u32 lower; u32 upper; struct rb_node tree_node; + u32 max; struct list_head local_publ; struct list_head all_publ; }; @@ -84,6 +87,130 @@ struct tipc_service { struct rcu_head rcu; }; +#define service_range_upper(sr) ((sr)->upper) +RB_DECLARE_CALLBACKS_MAX(static, sr_callbacks, + struct service_range, tree_node, u32, max, + service_range_upper) + +#define service_range_entry(rbtree_node) \ + (container_of(rbtree_node, struct service_range, tree_node)) + +#define service_range_overlap(sr, start, end) \ + ((sr)->lower <= (end) && (sr)->upper >= (start)) + +/** + * service_range_foreach_match - iterate over tipc service rbtree for each + * range match + * @sr: the service range pointer as a loop cursor + * @sc: the pointer to tipc service which holds the service range rbtree + * @start, end: the range (end >= start) for matching + */ +#define service_range_foreach_match(sr, sc, start, end) \ + for (sr = service_range_match_first((sc)->ranges.rb_node, \ + start, \ + end); \ + sr; \ + sr = service_range_match_next(&(sr)->tree_node, \ + start, \ + end)) + +/** + * service_range_match_first - find first service range matching a range + * @n: the root node of service range rbtree for searching + * @start, end: the range (end >= start) for matching + * + * Return: the leftmost service range node in the rbtree that overlaps the + * specific range if any. Otherwise, returns NULL. + */ +static struct service_range *service_range_match_first(struct rb_node *n, + u32 start, u32 end) +{ + struct service_range *sr; + struct rb_node *l, *r; + + /* Non overlaps in tree at all? */ + if (!n || service_range_entry(n)->max < start) + return NULL; + + while (n) { + l = n->rb_left; + if (l && service_range_entry(l)->max >= start) { + /* A leftmost overlap range node must be one in the left + * subtree. If not, it has lower > end, then nodes on + * the right side cannot satisfy the condition either. + */ + n = l; + continue; + } + + /* No one in the left subtree can match, return if this node is + * an overlap i.e. leftmost. + */ + sr = service_range_entry(n); + if (service_range_overlap(sr, start, end)) + return sr; + + /* Ok, try to lookup on the right side */ + r = n->rb_right; + if (sr->lower <= end && + r && service_range_entry(r)->max >= start) { + n = r; + continue; + } + break; + } + + return NULL; +} + +/** + * service_range_match_next - find next service range matching a range + * @n: a node in service range rbtree from which the searching starts + * @start, end: the range (end >= start) for matching + * + * Return: the next service range node to the given node in the rbtree that + * overlaps the specific range if any. Otherwise, returns NULL. + */ +static struct service_range *service_range_match_next(struct rb_node *n, + u32 start, u32 end) +{ + struct service_range *sr; + struct rb_node *p, *r; + + while (n) { + r = n->rb_right; + if (r && service_range_entry(r)->max >= start) + /* A next overlap range node must be one in the right + * subtree. If not, it has lower > end, then any next + * successor (- an ancestor) of this node cannot + * satisfy the condition either. + */ + return service_range_match_first(r, start, end); + + /* No one in the right subtree can match, go up to find an + * ancestor of this node which is parent of a left-hand child. + */ + while ((p = rb_parent(n)) && n == p->rb_right) + n = p; + if (!p) + break; + + /* Return if this ancestor is an overlap */ + sr = service_range_entry(p); + if (service_range_overlap(sr, start, end)) + return sr; + + /* Ok, try to lookup more from this ancestor */ + if (sr->lower <= end) { + n = p; + continue; + } + break; + } + + return NULL; +} + static int hash(int x) { return x & (TIPC_NAMETBL_SIZE - 1); @@ -139,84 +266,51 @@ static struct tipc_service *tipc_service_create(u32 type, struct hlist_head *hd) return service; } -/** - * tipc_service_first_range - find first service range in tree matching instance - * - * Very time-critical, so binary search through range rb tree - */ -static struct service_range *tipc_service_first_range(struct tipc_service *sc, - u32 instance) -{ - struct rb_node *n = sc->ranges.rb_node; - struct service_range *sr; - - while (n) { - sr = container_of(n, struct service_range, tree_node); - if (sr->lower > instance) - n = n->rb_left; - else if (sr->upper < instance) - n = n->rb_right; - else - return sr; - } - return NULL; -} - /* tipc_service_find_range - find service range matching publication parameters */ static struct service_range *tipc_service_find_range(struct tipc_service *sc, u32 lower, u32 upper) { - struct rb_node *n = sc->ranges.rb_node; struct service_range *sr; - sr = tipc_service_first_range(sc, lower); - if (!sr) - return NULL; - - /* Look for exact match */ - for (n = &sr->tree_node; n; n = rb_next(n)) { - sr = container_of(n, struct service_range, tree_node); - if (sr->upper == upper) - break; + service_range_foreach_match(sr, sc, lower, upper) { + /* Look for exact match */ + if (sr->lower == lower && sr->upper == upper) + return sr; } - if (!n || sr->lower != lower || sr->upper != upper) - return NULL; - return sr; + return NULL; } static struct service_range *tipc_service_create_range(struct tipc_service *sc, u32 lower, u32 upper) { struct rb_node **n, *parent = NULL; - struct service_range *sr, *tmp; + struct service_range *sr; n = &sc->ranges.rb_node; while (*n) { - tmp = container_of(*n, struct service_range, tree_node); parent = *n; - tmp = container_of(parent, struct service_range, tree_node); - if (lower < tmp->lower) - n = &(*n)->rb_left; - else if (lower > tmp->lower) - n = &(*n)->rb_right; - else if (upper < tmp->upper) - n = &(*n)->rb_left; - else if (upper > tmp->upper) - n = &(*n)->rb_right; + sr = service_range_entry(parent); + if (lower == sr->lower && upper == sr->upper) + return sr; + if (sr->max < upper) + sr->max = upper; + if (lower <= sr->lower) + n = &parent->rb_left; else - return tmp; + n = &parent->rb_right; } sr = kzalloc(sizeof(*sr), GFP_ATOMIC); if (!sr) return NULL; sr->lower = lower; sr->upper = upper; + sr->max = upper; INIT_LIST_HEAD(&sr->local_publ); INIT_LIST_HEAD(&sr->all_publ); rb_link_node(&sr->tree_node, parent, n); - rb_insert_color(&sr->tree_node, &sc->ranges); + rb_insert_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); return sr; } @@ -310,7 +404,6 @@ static void tipc_service_subscribe(struct tipc_service *service, struct list_head publ_list; struct service_range *sr; struct tipc_name_seq ns; - struct rb_node *n; u32 filter; ns.type = tipc_sub_read(sb, seq.type); @@ -325,13 +418,7 @@ static void tipc_service_subscribe(struct tipc_service *service, return; INIT_LIST_HEAD(&publ_list); - for (n = rb_first(&service->ranges); n; n = rb_next(n)) { - sr = container_of(n, struct service_range, tree_node); - if (sr->lower > ns.upper) - break; - if (!tipc_sub_check_overlap(&ns, sr->lower, sr->upper)) - continue; - + service_range_foreach_match(sr, service, ns.lower, ns.upper) { first = NULL; list_for_each_entry(p, &sr->all_publ, all_publ) { if (filter & TIPC_SUB_PORTS) @@ -425,7 +512,7 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, /* Remove service range item if this was its last publication */ if (list_empty(&sr->all_publ)) { - rb_erase(&sr->tree_node, &sc->ranges); + rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); kfree(sr); } @@ -473,34 +560,39 @@ u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *dnode) rcu_read_lock(); sc = tipc_service_find(net, type); if (unlikely(!sc)) - goto not_found; + goto exit; spin_lock_bh(&sc->lock); - sr = tipc_service_first_range(sc, instance); - if (unlikely(!sr)) - goto no_match; - - /* Select lookup algorithm: local, closest-first or round-robin */ - if (*dnode == self) { - list = &sr->local_publ; - if (list_empty(list)) - goto no_match; - p = list_first_entry(list, struct publication, local_publ); - list_move_tail(&p->local_publ, &sr->local_publ); - } else if (legacy && !*dnode && !list_empty(&sr->local_publ)) { - list = &sr->local_publ; - p = list_first_entry(list, struct publication, local_publ); - list_move_tail(&p->local_publ, &sr->local_publ); - } else { - list = &sr->all_publ; - p = list_first_entry(list, struct publication, all_publ); - list_move_tail(&p->all_publ, &sr->all_publ); + service_range_foreach_match(sr, sc, instance, instance) { + /* Select lookup algo: local, closest-first or round-robin */ + if (*dnode == self) { + list = &sr->local_publ; + if (list_empty(list)) + continue; + p = list_first_entry(list, struct publication, + local_publ); + list_move_tail(&p->local_publ, &sr->local_publ); + } else if (legacy && !*dnode && !list_empty(&sr->local_publ)) { + list = &sr->local_publ; + p = list_first_entry(list, struct publication, + local_publ); + list_move_tail(&p->local_publ, &sr->local_publ); + } else { + list = &sr->all_publ; + p = list_first_entry(list, struct publication, + all_publ); + list_move_tail(&p->all_publ, &sr->all_publ); + } + port = p->port; + node = p->node; + /* Todo: as for legacy, pick the first matching range only, a + * "true" round-robin will be performed as needed. + */ + break; } - port = p->port; - node = p->node; -no_match: spin_unlock_bh(&sc->lock); -not_found: + +exit: rcu_read_unlock(); *dnode = node; return port; @@ -523,7 +615,8 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope, spin_lock_bh(&sc->lock); - sr = tipc_service_first_range(sc, instance); + /* Todo: a full search i.e. service_range_foreach_match() instead? */ + sr = service_range_match_first(sc->ranges.rb_node, instance, instance); if (!sr) goto no_match; @@ -552,7 +645,6 @@ void tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper, struct service_range *sr; struct tipc_service *sc; struct publication *p; - struct rb_node *n; rcu_read_lock(); sc = tipc_service_find(net, type); @@ -560,13 +652,7 @@ void tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper, goto exit; spin_lock_bh(&sc->lock); - - for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { - sr = container_of(n, struct service_range, tree_node); - if (sr->upper < lower) - continue; - if (sr->lower > upper) - break; + service_range_foreach_match(sr, sc, lower, upper) { list_for_each_entry(p, &sr->local_publ, local_publ) { if (p->scope == scope || (!exact && p->scope < scope)) tipc_dest_push(dports, 0, p->port); @@ -587,7 +673,6 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, struct service_range *sr; struct tipc_service *sc; struct publication *p; - struct rb_node *n; rcu_read_lock(); sc = tipc_service_find(net, type); @@ -595,13 +680,7 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, goto exit; spin_lock_bh(&sc->lock); - - for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { - sr = container_of(n, struct service_range, tree_node); - if (sr->upper < lower) - continue; - if (sr->lower > upper) - break; + service_range_foreach_match(sr, sc, lower, upper) { list_for_each_entry(p, &sr->all_publ, all_publ) { tipc_nlist_add(nodes, p->node); } @@ -799,7 +878,7 @@ static void tipc_service_delete(struct net *net, struct tipc_service *sc) tipc_service_remove_publ(sr, p->node, p->key); kfree_rcu(p, rcu); } - rb_erase(&sr->tree_node, &sc->ranges); + rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); kfree(sr); } hlist_del_init_rcu(&sc->service_list); diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 0254bb7e418b..217516357ef2 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -204,8 +204,8 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, return -ENOMEM; } - attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1, - sizeof(struct nlattr *), GFP_KERNEL); + attrbuf = kcalloc(tipc_genl_family.maxattr + 1, + sizeof(struct nlattr *), GFP_KERNEL); if (!attrbuf) { err = -ENOMEM; goto err_out; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 41688da233ab..f9b4fb92c0b1 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -287,12 +287,12 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) * * Caller must hold socket lock */ -static void tsk_rej_rx_queue(struct sock *sk) +static void tsk_rej_rx_queue(struct sock *sk, int error) { struct sk_buff *skb; while ((skb = __skb_dequeue(&sk->sk_receive_queue))) - tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); + tipc_sk_respond(sk, skb, error); } static bool tipc_sk_connected(struct sock *sk) @@ -545,34 +545,45 @@ static void __tipc_shutdown(struct socket *sock, int error) /* Remove pending SYN */ __skb_queue_purge(&sk->sk_write_queue); - /* Reject all unreceived messages, except on an active connection - * (which disconnects locally & sends a 'FIN+' to peer). - */ - while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { - if (TIPC_SKB_CB(skb)->bytes_read) { - kfree_skb(skb); - continue; - } - if (!tipc_sk_type_connectionless(sk) && - sk->sk_state != TIPC_DISCONNECTING) { - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - tipc_node_remove_conn(net, dnode, tsk->portid); - } - tipc_sk_respond(sk, skb, error); + /* Remove partially received buffer if any */ + skb = skb_peek(&sk->sk_receive_queue); + if (skb && TIPC_SKB_CB(skb)->bytes_read) { + __skb_unlink(skb, &sk->sk_receive_queue); + kfree_skb(skb); } - if (tipc_sk_type_connectionless(sk)) + /* Reject all unreceived messages if connectionless */ + if (tipc_sk_type_connectionless(sk)) { + tsk_rej_rx_queue(sk, error); return; + } - if (sk->sk_state != TIPC_DISCONNECTING) { + switch (sk->sk_state) { + case TIPC_CONNECTING: + case TIPC_ESTABLISHED: + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + tipc_node_remove_conn(net, dnode, tsk->portid); + /* Send a FIN+/- to its peer */ + skb = __skb_dequeue(&sk->sk_receive_queue); + if (skb) { + __skb_queue_purge(&sk->sk_receive_queue); + tipc_sk_respond(sk, skb, error); + break; + } skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, tsk_own_node(tsk), tsk_peer_port(tsk), tsk->portid, error); if (skb) tipc_node_xmit_skb(net, skb, dnode, tsk->portid); - tipc_node_remove_conn(net, dnode, tsk->portid); - tipc_set_sk_state(sk, TIPC_DISCONNECTING); + break; + case TIPC_LISTEN: + /* Reject all SYN messages */ + tsk_rej_rx_queue(sk, error); + break; + default: + __skb_queue_purge(&sk->sk_receive_queue); + break; } } @@ -1364,8 +1375,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) struct tipc_msg *hdr = &tsk->phdr; struct tipc_name_seq *seq; struct sk_buff_head pkts; - u32 dport, dnode = 0; - u32 type, inst; + u32 dport = 0, dnode = 0; + u32 type = 0, inst = 0; int mtu, rc; if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) @@ -1418,23 +1429,11 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) type = dest->addr.name.name.type; inst = dest->addr.name.name.instance; dnode = dest->addr.name.domain; - msg_set_type(hdr, TIPC_NAMED_MSG); - msg_set_hdr_sz(hdr, NAMED_H_SIZE); - msg_set_nametype(hdr, type); - msg_set_nameinst(hdr, inst); - msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); dport = tipc_nametbl_translate(net, type, inst, &dnode); - msg_set_destnode(hdr, dnode); - msg_set_destport(hdr, dport); if (unlikely(!dport && !dnode)) return -EHOSTUNREACH; } else if (dest->addrtype == TIPC_ADDR_ID) { dnode = dest->addr.id.node; - msg_set_type(hdr, TIPC_DIRECT_MSG); - msg_set_lookup_scope(hdr, 0); - msg_set_destnode(hdr, dnode); - msg_set_destport(hdr, dest->addr.id.ref); - msg_set_hdr_sz(hdr, BASIC_H_SIZE); } else { return -EINVAL; } @@ -1445,6 +1444,22 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) if (unlikely(rc)) return rc; + if (dest->addrtype == TIPC_ADDR_NAME) { + msg_set_type(hdr, TIPC_NAMED_MSG); + msg_set_hdr_sz(hdr, NAMED_H_SIZE); + msg_set_nametype(hdr, type); + msg_set_nameinst(hdr, inst); + msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); + msg_set_destnode(hdr, dnode); + msg_set_destport(hdr, dport); + } else { /* TIPC_ADDR_ID */ + msg_set_type(hdr, TIPC_DIRECT_MSG); + msg_set_lookup_scope(hdr, 0); + msg_set_destnode(hdr, dnode); + msg_set_destport(hdr, dest->addr.id.ref); + msg_set_hdr_sz(hdr, BASIC_H_SIZE); + } + __skb_queue_head_init(&pkts); mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false); rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); @@ -2428,8 +2443,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) return sock_intr_errno(*timeo_p); add_wait_queue(sk_sleep(sk), &wait); - done = sk_wait_event(sk, timeo_p, - sk->sk_state != TIPC_CONNECTING, &wait); + done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk), + &wait); remove_wait_queue(sk_sleep(sk), &wait); } while (!done); return 0; @@ -2639,7 +2654,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, * Reject any stray messages received by new socket * before the socket lock was taken (very, very unlikely) */ - tsk_rej_rx_queue(new_sk); + tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT); /* Connect new socket to it's peer */ tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index dac24c7aa7d4..94774c0e5ff3 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -732,15 +732,19 @@ out: return rc; } -static void tls_update(struct sock *sk, struct proto *p) +static void tls_update(struct sock *sk, struct proto *p, + void (*write_space)(struct sock *sk)) { struct tls_context *ctx; ctx = tls_get_ctx(sk); - if (likely(ctx)) + if (likely(ctx)) { + ctx->sk_write_space = write_space; ctx->sk_proto = p; - else + } else { sk->sk_prot = p; + sk->sk_write_space = write_space; + } } static int tls_get_info(const struct sock *sk, struct sk_buff *skb) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index c6803a82b769..c98e602a1a2d 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -256,8 +256,6 @@ static int tls_do_decryption(struct sock *sk, return ret; ret = crypto_wait_req(ret, &ctx->async_wait); - } else if (ret == -EBADMSG) { - TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); } if (async) @@ -682,12 +680,32 @@ static int tls_push_record(struct sock *sk, int flags, split_point = msg_pl->apply_bytes; split = split_point && split_point < msg_pl->sg.size; + if (unlikely((!split && + msg_pl->sg.size + + prot->overhead_size > msg_en->sg.size) || + (split && + split_point + + prot->overhead_size > msg_en->sg.size))) { + split = true; + split_point = msg_en->sg.size; + } if (split) { rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, split_point, prot->overhead_size, &orig_end); if (rc < 0) return rc; + /* This can happen if above tls_split_open_record allocates + * a single large encryption buffer instead of two smaller + * ones. In this case adjust pointers and continue without + * split. + */ + if (!msg_pl->sg.size) { + tls_merge_open_record(sk, rec, tmp, orig_end); + msg_pl = &rec->msg_plaintext; + msg_en = &rec->msg_encrypted; + split = false; + } sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size); } @@ -709,6 +727,12 @@ static int tls_push_record(struct sock *sk, int flags, sg_mark_end(sk_msg_elem(msg_pl, i)); } + if (msg_pl->sg.end < msg_pl->sg.start) { + sg_chain(&msg_pl->sg.data[msg_pl->sg.start], + MAX_SKB_FRAGS - msg_pl->sg.start + 1, + msg_pl->sg.data); + } + i = msg_pl->sg.start; sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); @@ -772,7 +796,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, psock = sk_psock_get(sk); if (!psock || !policy) { err = tls_push_record(sk, flags, record_type); - if (err) { + if (err && err != -EINPROGRESS) { *copied -= sk_msg_free(sk, msg); tls_free_open_rec(sk); } @@ -783,10 +807,7 @@ more_data: if (psock->eval == __SK_NONE) { delta = msg->sg.size; psock->eval = sk_psock_msg_verdict(sk, psock, msg); - if (delta < msg->sg.size) - delta -= msg->sg.size; - else - delta = 0; + delta -= msg->sg.size; } if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && !enospc && !full_record) { @@ -801,7 +822,7 @@ more_data: switch (psock->eval) { case __SK_PASS: err = tls_push_record(sk, flags, record_type); - if (err < 0) { + if (err && err != -EINPROGRESS) { *copied -= sk_msg_free(sk, msg); tls_free_open_rec(sk); goto out_err; @@ -1515,7 +1536,9 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, if (err == -EINPROGRESS) tls_advance_record_sn(sk, prot, &tls_ctx->rx); - + else if (err == -EBADMSG) + TLS_INC_STATS(sock_net(sk), + LINUX_MIB_TLSDECRYPTERROR); return err; } } else { diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index b3bdae74c243..3492c021925f 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -138,28 +138,15 @@ struct hvsock { **************************************************************************** * The only valid Service GUIDs, from the perspectives of both the host and * * Linux VM, that can be connected by the other end, must conform to this * - * format: <port>-facb-11e6-bd58-64006a7986d3, and the "port" must be in * - * this range [0, 0x7FFFFFFF]. * + * format: <port>-facb-11e6-bd58-64006a7986d3. * **************************************************************************** * * When we write apps on the host to connect(), the GUID ServiceID is used. * When we write apps in Linux VM to connect(), we only need to specify the * port and the driver will form the GUID and use that to request the host. * - * From the perspective of Linux VM: - * 1. the local ephemeral port (i.e. the local auto-bound port when we call - * connect() without explicit bind()) is generated by __vsock_bind_stream(), - * and the range is [1024, 0xFFFFFFFF). - * 2. the remote ephemeral port (i.e. the auto-generated remote port for - * a connect request initiated by the host's connect()) is generated by - * hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF). */ -#define MAX_LISTEN_PORT ((u32)0x7FFFFFFF) -#define MAX_VM_LISTEN_PORT MAX_LISTEN_PORT -#define MAX_HOST_LISTEN_PORT MAX_LISTEN_PORT -#define MIN_HOST_EPHEMERAL_PORT (MAX_HOST_LISTEN_PORT + 1) - /* 00000000-facb-11e6-bd58-64006a7986d3 */ static const guid_t srv_id_template = GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58, @@ -184,34 +171,6 @@ static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id) vsock_addr_init(addr, VMADDR_CID_ANY, port); } -static void hvs_remote_addr_init(struct sockaddr_vm *remote, - struct sockaddr_vm *local) -{ - static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT; - struct sock *sk; - - /* Remote peer is always the host */ - vsock_addr_init(remote, VMADDR_CID_HOST, VMADDR_PORT_ANY); - - while (1) { - /* Wrap around ? */ - if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT || - host_ephemeral_port == VMADDR_PORT_ANY) - host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT; - - remote->svm_port = host_ephemeral_port++; - - sk = vsock_find_connected_socket(remote, local); - if (!sk) { - /* Found an available ephemeral port */ - return; - } - - /* Release refcnt got in vsock_find_connected_socket */ - sock_put(sk); - } -} - static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan) { set_channel_pending_send_size(chan, @@ -341,12 +300,7 @@ static void hvs_open_connection(struct vmbus_channel *chan) if_type = &chan->offermsg.offer.if_type; if_instance = &chan->offermsg.offer.if_instance; conn_from_host = chan->offermsg.offer.u.pipe.user_def[0]; - - /* The host or the VM should only listen on a port in - * [0, MAX_LISTEN_PORT] - */ - if (!is_valid_srv_id(if_type) || - get_port_by_srv_id(if_type) > MAX_LISTEN_PORT) + if (!is_valid_srv_id(if_type)) return; hvs_addr_init(&addr, conn_from_host ? if_type : if_instance); @@ -371,8 +325,11 @@ static void hvs_open_connection(struct vmbus_channel *chan) vnew = vsock_sk(new); hvs_addr_init(&vnew->local_addr, if_type); - hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr); + /* Remote peer is always the host */ + vsock_addr_init(&vnew->remote_addr, + VMADDR_CID_HOST, VMADDR_PORT_ANY); + vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance); ret = vsock_assign_transport(vnew, vsock_sk(sk)); /* Transport assigned (looking at remote_addr) must be the * same where we received the request. @@ -766,16 +723,6 @@ static bool hvs_stream_is_active(struct vsock_sock *vsk) static bool hvs_stream_allow(u32 cid, u32 port) { - /* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is - * reserved as ephemeral ports, which are used as the host's ports - * when the host initiates connections. - * - * Perform this check in the guest so an immediate error is produced - * instead of a timeout. - */ - if (port > MAX_HOST_LISTEN_PORT) - return false; - if (cid == VMADDR_CID_HOST) return true; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index e5ea29c6bca7..6abec3fc81d1 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -34,6 +34,9 @@ virtio_transport_get_ops(struct vsock_sock *vsk) { const struct vsock_transport *t = vsock_core_get_transport(vsk); + if (WARN_ON(!t)) + return NULL; + return container_of(t, struct virtio_transport, transport); } @@ -161,15 +164,25 @@ void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt) } EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); +/* This function can only be used on connecting/connected sockets, + * since a socket assigned to a transport is required. + * + * Do not use on listener sockets! + */ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, struct virtio_vsock_pkt_info *info) { u32 src_cid, src_port, dst_cid, dst_port; + const struct virtio_transport *t_ops; struct virtio_vsock_sock *vvs; struct virtio_vsock_pkt *pkt; u32 pkt_len = info->pkt_len; - src_cid = virtio_transport_get_ops(vsk)->transport.get_local_cid(); + t_ops = virtio_transport_get_ops(vsk); + if (unlikely(!t_ops)) + return -EFAULT; + + src_cid = t_ops->transport.get_local_cid(); src_port = vsk->local_addr.svm_port; if (!info->remote_cid) { dst_cid = vsk->remote_addr.svm_cid; @@ -202,7 +215,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, virtio_transport_inc_tx_pkt(vvs, pkt); - return virtio_transport_get_ops(vsk)->send_pkt(pkt); + return t_ops->send_pkt(pkt); } static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, @@ -1021,18 +1034,18 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt, int ret; if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) { - virtio_transport_reset(vsk, pkt); + virtio_transport_reset_no_sock(t, pkt); return -EINVAL; } if (sk_acceptq_is_full(sk)) { - virtio_transport_reset(vsk, pkt); + virtio_transport_reset_no_sock(t, pkt); return -ENOMEM; } child = vsock_create_connected(sk); if (!child) { - virtio_transport_reset(vsk, pkt); + virtio_transport_reset_no_sock(t, pkt); return -ENOMEM; } @@ -1054,7 +1067,7 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt, */ if (ret || vchild->transport != &t->transport) { release_sock(child); - virtio_transport_reset(vsk, pkt); + virtio_transport_reset_no_sock(t, pkt); sock_put(child); return ret; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 350513744575..3e25229a059d 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1102,6 +1102,7 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) #ifdef CONFIG_CFG80211_WEXT kzfree(wdev->wext.keys); + wdev->wext.keys = NULL; #endif /* only initialized if we have a netdev */ if (wdev->netdev) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index da5262b2298b..1e97ac5435b2 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -10843,6 +10843,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, if (err) return err; + cfg80211_sinfo_release_content(&sinfo); if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) wdev->cqm_config->last_rssi_event_value = (s8) sinfo.rx_beacon_signal_avg; @@ -13796,6 +13797,8 @@ static int nl80211_probe_mesh_link(struct sk_buff *skb, struct genl_info *info) if (err) return err; + cfg80211_sinfo_release_content(&sinfo); + return rdev_probe_mesh_link(rdev, dev, dest, buf, len); } diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index e853a4fe6f97..e0d34f796d0b 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -538,6 +538,10 @@ static inline int rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed) { int ret; + + if (!rdev->ops->set_wiphy_params) + return -EOPNOTSUPP; + trace_rdev_set_wiphy_params(&rdev->wiphy, changed); ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); trace_rdev_return_int(&rdev->wiphy, ret); @@ -1167,6 +1171,16 @@ rdev_start_radar_detection(struct cfg80211_registered_device *rdev, return ret; } +static inline void +rdev_end_cac(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + trace_rdev_end_cac(&rdev->wiphy, dev); + if (rdev->ops->end_cac) + rdev->ops->end_cac(&rdev->wiphy, dev); + trace_rdev_return_void(&rdev->wiphy); +} + static inline int rdev_set_mcast_rate(struct cfg80211_registered_device *rdev, struct net_device *dev, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 446c76d44e65..fff9a74891fc 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2261,14 +2261,15 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) static void handle_channel_custom(struct wiphy *wiphy, struct ieee80211_channel *chan, - const struct ieee80211_regdomain *regd) + const struct ieee80211_regdomain *regd, + u32 min_bw) { u32 bw_flags = 0; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; u32 bw; - for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) { + for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) { reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(chan->center_freq), regd, bw); if (!IS_ERR(reg_rule)) @@ -2324,8 +2325,14 @@ static void handle_band_custom(struct wiphy *wiphy, if (!sband) return; + /* + * We currently assume that you always want at least 20 MHz, + * otherwise channel 12 might get enabled if this rule is + * compatible to US, which permits 2402 - 2472 MHz. + */ for (i = 0; i < sband->n_channels; i++) - handle_channel_custom(wiphy, &sband->channels[i], regd); + handle_channel_custom(wiphy, &sband->channels[i], regd, + MHZ_TO_KHZ(20)); } /* Used by drivers prior to wiphy registration */ @@ -3885,6 +3892,25 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy) } EXPORT_SYMBOL(regulatory_pre_cac_allowed); +static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev) +{ + struct wireless_dev *wdev; + /* If we finished CAC or received radar, we should end any + * CAC running on the same channels. + * the check !cfg80211_chandef_dfs_usable contain 2 options: + * either all channels are available - those the CAC_FINISHED + * event has effected another wdev state, or there is a channel + * in unavailable state in wdev chandef - those the RADAR_DETECTED + * event has effected another wdev state. + * In both cases we should end the CAC on the wdev. + */ + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + if (wdev->cac_started && + !cfg80211_chandef_dfs_usable(&rdev->wiphy, &wdev->chandef)) + rdev_end_cac(rdev, wdev->netdev); + } +} + void regulatory_propagate_dfs_state(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_dfs_state dfs_state, @@ -3911,8 +3937,10 @@ void regulatory_propagate_dfs_state(struct wiphy *wiphy, cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state); if (event == NL80211_RADAR_DETECTED || - event == NL80211_RADAR_CAC_FINISHED) + event == NL80211_RADAR_CAC_FINISHED) { cfg80211_sched_dfs_chan_update(rdev); + cfg80211_check_and_end_cac(rdev); + } nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL); } diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 7a6c38ddc65a..d32a2ec4d96a 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -1307,14 +1307,14 @@ void cfg80211_autodisconnect_wk(struct work_struct *work) if (wdev->conn_owner_nlportid) { switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: - cfg80211_leave_ibss(rdev, wdev->netdev, false); + __cfg80211_leave_ibss(rdev, wdev->netdev, false); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: - cfg80211_stop_ap(rdev, wdev->netdev, false); + __cfg80211_stop_ap(rdev, wdev->netdev, false); break; case NL80211_IFTYPE_MESH_POINT: - cfg80211_leave_mesh(rdev, wdev->netdev); + __cfg80211_leave_mesh(rdev, wdev->netdev); break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: diff --git a/net/wireless/trace.h b/net/wireless/trace.h index d98ad2b3143b..8677d7ab7d69 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -646,6 +646,11 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa, TP_ARGS(wiphy, netdev) ); +DEFINE_EVENT(wiphy_netdev_evt, rdev_end_cac, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), + TP_ARGS(wiphy, netdev) +); + DECLARE_EVENT_CLASS(station_add_change, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, struct station_parameters *params), diff --git a/net/wireless/util.c b/net/wireless/util.c index 5b4ed5bbc542..8481e9ac33da 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -564,7 +564,7 @@ __frame_add_frag(struct sk_buff *skb, struct page *page, struct skb_shared_info *sh = skb_shinfo(skb); int page_offset; - page_ref_inc(page); + get_page(page); page_offset = ptr - page_address(page); skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size); } diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 5e677dac2a0c..69102fda9ebd 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -657,7 +657,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev) return NULL; } -static int iw_handler_get_iwstats(struct net_device * dev, +/* noinline to avoid a bogus warning with -O3 */ +static noinline int iw_handler_get_iwstats(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index c34f7d077604..d5b09bbff375 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -659,6 +659,12 @@ static int x25_release(struct socket *sock) sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; + + case X25_STATE_5: + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25_disconnect(sk, 0, 0, 0); + __x25_destroy_socket(sk); + goto out; } sock_orphan(sk); @@ -760,6 +766,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, if (sk->sk_state == TCP_ESTABLISHED) goto out; + rc = -EALREADY; /* Do nothing if call is already in progress */ + if (sk->sk_state == TCP_SYN_SENT) + goto out; + sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; @@ -806,7 +816,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, /* Now the loop */ rc = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) - goto out_put_neigh; + goto out; rc = x25_wait_for_connection_establishment(sk); if (rc) @@ -1054,6 +1064,8 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { x25_write_internal(make, X25_CALL_ACCEPTED); makex25->state = X25_STATE_3; + } else { + makex25->state = X25_STATE_5; } /* diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index f97c43344e95..4d3bb46aaae0 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -382,6 +382,35 @@ out_clear: return 0; } +/* + * State machine for state 5, Call Accepted / Call Connected pending (X25_ACCPT_APPRV_FLAG). + * The handling of the timer(s) is in file x25_timer.c + * Handling of state 0 and connection release is in af_x25.c. + */ +static int x25_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype) +{ + struct x25_sock *x25 = x25_sk(sk); + + switch (frametype) { + case X25_CLEAR_REQUEST: + if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) { + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25->state = X25_STATE_2; + x25_start_t23timer(sk); + return 0; + } + + x25_write_internal(sk, X25_CLEAR_CONFIRMATION); + x25_disconnect(sk, 0, skb->data[3], skb->data[4]); + break; + + default: + break; + } + + return 0; +} + /* Higher level upcall for a LAPB frame */ int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb) { @@ -406,6 +435,9 @@ int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb) case X25_STATE_4: queued = x25_state4_machine(sk, skb, frametype); break; + case X25_STATE_5: + queued = x25_state5_machine(sk, skb, frametype); + break; } x25_kick(sk); diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 956793893c9d..328f661b83b2 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -334,12 +334,21 @@ out: } EXPORT_SYMBOL(xsk_umem_consume_tx); -static int xsk_zc_xmit(struct xdp_sock *xs) +static int xsk_wakeup(struct xdp_sock *xs, u8 flags) { struct net_device *dev = xs->dev; + int err; + + rcu_read_lock(); + err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); + rcu_read_unlock(); + + return err; +} - return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, - XDP_WAKEUP_TX); +static int xsk_zc_xmit(struct xdp_sock *xs) +{ + return xsk_wakeup(xs, XDP_WAKEUP_TX); } static void xsk_destruct_skb(struct sk_buff *skb) @@ -453,19 +462,16 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock, __poll_t mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); - struct net_device *dev; struct xdp_umem *umem; if (unlikely(!xsk_is_bound(xs))) return mask; - dev = xs->dev; umem = xs->umem; if (umem->need_wakeup) { - if (dev->netdev_ops->ndo_xsk_wakeup) - dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, - umem->need_wakeup); + if (xs->zc) + xsk_wakeup(xs, umem->need_wakeup); else /* Poll needs to drive Tx also in copy mode */ __xsk_sendmsg(sk); diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c index 1d78819ffef1..630ce8c4d5a2 100644 --- a/samples/bpf/syscall_tp_kern.c +++ b/samples/bpf/syscall_tp_kern.c @@ -47,13 +47,27 @@ static __always_inline void count(void *map) SEC("tracepoint/syscalls/sys_enter_open") int trace_enter_open(struct syscalls_enter_open_args *ctx) { - count((void *)&enter_open_map); + count(&enter_open_map); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_openat") +int trace_enter_open_at(struct syscalls_enter_open_args *ctx) +{ + count(&enter_open_map); return 0; } SEC("tracepoint/syscalls/sys_exit_open") int trace_enter_exit(struct syscalls_exit_open_args *ctx) { - count((void *)&exit_open_map); + count(&exit_open_map); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_openat") +int trace_enter_exit_at(struct syscalls_exit_open_args *ctx) +{ + count(&exit_open_map); return 0; } diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c index 16a16eadd509..749a50f2f9f3 100644 --- a/samples/bpf/trace_event_user.c +++ b/samples/bpf/trace_event_user.c @@ -37,9 +37,9 @@ static void print_ksym(__u64 addr) } printf("%s;", sym->name); - if (!strcmp(sym->name, "sys_read")) + if (!strstr(sym->name, "sys_read")) sys_read_seen = true; - else if (!strcmp(sym->name, "sys_write")) + else if (!strstr(sym->name, "sys_write")) sys_write_seen = true; } diff --git a/samples/seccomp/user-trap.c b/samples/seccomp/user-trap.c index 6d0125ca8af7..20291ec6489f 100644 --- a/samples/seccomp/user-trap.c +++ b/samples/seccomp/user-trap.c @@ -298,14 +298,14 @@ int main(void) req = malloc(sizes.seccomp_notif); if (!req) goto out_close; - memset(req, 0, sizeof(*req)); resp = malloc(sizes.seccomp_notif_resp); if (!resp) goto out_req; - memset(resp, 0, sizeof(*resp)); + memset(resp, 0, sizes.seccomp_notif_resp); while (1) { + memset(req, 0, sizes.seccomp_notif); if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) { perror("ioctl recv"); goto out_resp; diff --git a/samples/trace_printk/trace-printk.c b/samples/trace_printk/trace-printk.c index 7affc3b50b61..cfc159580263 100644 --- a/samples/trace_printk/trace-printk.c +++ b/samples/trace_printk/trace-printk.c @@ -36,6 +36,7 @@ static int __init trace_printk_init(void) /* Kick off printing in irq context */ irq_work_queue(&irqwork); + irq_work_sync(&irqwork); trace_printk("This is a %s that will use trace_bprintk()\n", "static string"); diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig index d33de0b9f4f5..e3569543bdac 100644 --- a/scripts/gcc-plugins/Kconfig +++ b/scripts/gcc-plugins/Kconfig @@ -14,8 +14,8 @@ config HAVE_GCC_PLUGINS An arch should select this symbol if it supports building with GCC plugins. -config GCC_PLUGINS - bool +menuconfig GCC_PLUGINS + bool "GCC plugins" depends on HAVE_GCC_PLUGINS depends on PLUGIN_HOSTCC != "" default y @@ -25,8 +25,7 @@ config GCC_PLUGINS See Documentation/core-api/gcc-plugins.rst for details. -menu "GCC plugins" - depends on GCC_PLUGINS +if GCC_PLUGINS config GCC_PLUGIN_CYC_COMPLEXITY bool "Compute the cyclomatic complexity of a function" if EXPERT @@ -113,4 +112,4 @@ config GCC_PLUGIN_ARM_SSP_PER_TASK bool depends on GCC_PLUGINS && ARM -endmenu +endif diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index fb55f262f42d..94153732ec00 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -310,6 +310,15 @@ static void output_label(const char *label) printf("%s:\n", label); } +/* Provide proper symbols relocatability by their '_text' relativeness. */ +static void output_address(unsigned long long addr) +{ + if (_text <= addr) + printf("\tPTR\t_text + %#llx\n", addr - _text); + else + printf("\tPTR\t_text - %#llx\n", _text - addr); +} + /* uncompress a compressed symbol. When this function is called, the best table * might still be compressed itself, so the function needs to be recursive */ static int expand_symbol(const unsigned char *data, int len, char *result) @@ -360,19 +369,6 @@ static void write_src(void) printf("\t.section .rodata, \"a\"\n"); - /* Provide proper symbols relocatability by their relativeness - * to a fixed anchor point in the runtime image, either '_text' - * for absolute address tables, in which case the linker will - * emit the final addresses at build time. Otherwise, use the - * offset relative to the lowest value encountered of all relative - * symbols, and emit non-relocatable fixed offsets that will be fixed - * up at runtime. - * - * The symbol names cannot be used to construct normal symbol - * references as the list of symbols contains symbols that are - * declared static and are private to their .o files. This prevents - * .tmp_kallsyms.o or any other object from referencing them. - */ if (!base_relative) output_label("kallsyms_addresses"); else @@ -380,6 +376,13 @@ static void write_src(void) for (i = 0; i < table_cnt; i++) { if (base_relative) { + /* + * Use the offset relative to the lowest value + * encountered of all relative symbols, and emit + * non-relocatable fixed offsets that will be fixed + * up at runtime. + */ + long long offset; int overflow; @@ -402,12 +405,7 @@ static void write_src(void) } printf("\t.long\t%#x\n", (int)offset); } else if (!symbol_absolute(&table[i])) { - if (_text <= table[i].addr) - printf("\tPTR\t_text + %#llx\n", - table[i].addr - _text); - else - printf("\tPTR\t_text - %#llx\n", - _text - table[i].addr); + output_address(table[i].addr); } else { printf("\tPTR\t%#llx\n", table[i].addr); } @@ -416,7 +414,7 @@ static void write_src(void) if (base_relative) { output_label("kallsyms_relative_base"); - printf("\tPTR\t_text - %#llx\n", _text - relative_base); + output_address(relative_base); printf("\n"); } diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c index 77ffff3a053c..9f1de58e9f0c 100644 --- a/scripts/kconfig/expr.c +++ b/scripts/kconfig/expr.c @@ -254,6 +254,13 @@ static int expr_eq(struct expr *e1, struct expr *e2) { int res, old_count; + /* + * A NULL expr is taken to be yes, but there's also a different way to + * represent yes. expr_is_yes() checks for either representation. + */ + if (!e1 || !e2) + return expr_is_yes(e1) && expr_is_yes(e2); + if (e1->type != e2->type) return 0; switch (e1->type) { diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index d1d757c6edf4..3a5a4b210c86 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -55,12 +55,10 @@ CONFIG_FLAGS="" if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi -UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length - UTS_LEN=64 -UTS_TRUNCATE="cut -b -$UTS_LEN" +UTS_VERSION="$(echo $UTS_VERSION $CONFIG_FLAGS $TIMESTAMP | cut -b -$UTS_LEN)" # Generate a temporary compile.h @@ -69,10 +67,10 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" echo \#define UTS_MACHINE \"$ARCH\" - echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" + echo \#define UTS_VERSION \"$UTS_VERSION\" - echo \#define LINUX_COMPILE_BY \"`echo $LINUX_COMPILE_BY | $UTS_TRUNCATE`\" - echo \#define LINUX_COMPILE_HOST \"`echo $LINUX_COMPILE_HOST | $UTS_TRUNCATE`\" + printf '#define LINUX_COMPILE_BY "%s"\n' "$LINUX_COMPILE_BY" + echo \#define LINUX_COMPILE_HOST \"$LINUX_COMPILE_HOST\" echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | grep ' version ' | sed 's/[[:space:]]*$//'`\" } > .tmpcompile diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian index e0750b70453f..357dc56bcf30 100755 --- a/scripts/package/mkdebian +++ b/scripts/package/mkdebian @@ -136,7 +136,7 @@ mkdir -p debian/source/ echo "1.0" > debian/source/format echo $debarch > debian/arch -extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)" +extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)" extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)" # Generate a simple changelog template @@ -174,7 +174,7 @@ Source: $sourcename Section: kernel Priority: optional Maintainer: $maintainer -Build-Depends: bc, kmod, cpio, bison, flex | flex:native $extra_build_depends +Build-Depends: bc, rsync, kmod, cpio, bison, flex | flex:native $extra_build_depends Homepage: http://www.kernel.org/ Package: $packagename diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 09996f2552ee..47aff8700547 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -623,7 +623,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt) void __aa_bump_ns_revision(struct aa_ns *ns) { - ns->revision++; + WRITE_ONCE(ns->revision, ns->revision + 1); wake_up_interruptible(&ns->wait); } diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 9be7ccb8379e..6ceb74e0f789 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -317,6 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm, if (!bprm || !profile->xattr_count) return 0; + might_sleep(); /* transition from exec match to xattr set */ state = aa_dfa_null_transition(profile->xmatch, state); @@ -361,10 +362,11 @@ out: } /** - * __attach_match_ - find an attachment match + * find_attach - do attachment search for unconfined processes * @bprm - binprm structure of transitioning task - * @name - to match against (NOT NULL) + * @ns: the current namespace (NOT NULL) * @head - profile list to walk (NOT NULL) + * @name - to match against (NOT NULL) * @info - info message if there was an error (NOT NULL) * * Do a linear search on the profiles in the list. There is a matching @@ -374,12 +376,11 @@ out: * * Requires: @head not be shared or have appropriate locks held * - * Returns: profile or NULL if no match found + * Returns: label or NULL if no match found */ -static struct aa_profile *__attach_match(const struct linux_binprm *bprm, - const char *name, - struct list_head *head, - const char **info) +static struct aa_label *find_attach(const struct linux_binprm *bprm, + struct aa_ns *ns, struct list_head *head, + const char *name, const char **info) { int candidate_len = 0, candidate_xattrs = 0; bool conflict = false; @@ -388,6 +389,8 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, AA_BUG(!name); AA_BUG(!head); + rcu_read_lock(); +restart: list_for_each_entry_rcu(profile, head, base.list) { if (profile->label.flags & FLAG_NULL && &profile->label == ns_unconfined(profile->ns)) @@ -413,16 +416,32 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, perm = dfa_user_allow(profile->xmatch, state); /* any accepting state means a valid match. */ if (perm & MAY_EXEC) { - int ret; + int ret = 0; if (count < candidate_len) continue; - ret = aa_xattrs_match(bprm, profile, state); - /* Fail matching if the xattrs don't match */ - if (ret < 0) - continue; - + if (bprm && profile->xattr_count) { + long rev = READ_ONCE(ns->revision); + + if (!aa_get_profile_not0(profile)) + goto restart; + rcu_read_unlock(); + ret = aa_xattrs_match(bprm, profile, + state); + rcu_read_lock(); + aa_put_profile(profile); + if (rev != + READ_ONCE(ns->revision)) + /* policy changed */ + goto restart; + /* + * Fail matching if the xattrs don't + * match + */ + if (ret < 0) + continue; + } /* * TODO: allow for more flexible best match * @@ -445,43 +464,28 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, candidate_xattrs = ret; conflict = false; } - } else if (!strcmp(profile->base.name, name)) + } else if (!strcmp(profile->base.name, name)) { /* * old exact non-re match, without conditionals such * as xattrs. no more searching required */ - return profile; + candidate = profile; + goto out; + } } - if (conflict) { - *info = "conflicting profile attachments"; + if (!candidate || conflict) { + if (conflict) + *info = "conflicting profile attachments"; + rcu_read_unlock(); return NULL; } - return candidate; -} - -/** - * find_attach - do attachment search for unconfined processes - * @bprm - binprm structure of transitioning task - * @ns: the current namespace (NOT NULL) - * @list: list to search (NOT NULL) - * @name: the executable name to match against (NOT NULL) - * @info: info message if there was an error - * - * Returns: label or NULL if no match found - */ -static struct aa_label *find_attach(const struct linux_binprm *bprm, - struct aa_ns *ns, struct list_head *list, - const char *name, const char **info) -{ - struct aa_profile *profile; - - rcu_read_lock(); - profile = aa_get_profile(__attach_match(bprm, name, list, info)); +out: + candidate = aa_get_newest_profile(candidate); rcu_read_unlock(); - return profile ? &profile->label : NULL; + return &candidate->label; } static const char *next_name(int xtype, const char *name) diff --git a/security/apparmor/file.c b/security/apparmor/file.c index fe2ebe5e865e..f1caf3674e1c 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c @@ -618,8 +618,7 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file, fctx = file_ctx(file); rcu_read_lock(); - flabel = aa_get_newest_label(rcu_dereference(fctx->label)); - rcu_read_unlock(); + flabel = rcu_dereference(fctx->label); AA_BUG(!flabel); /* revalidate access, if task is unconfined, or the cached cred @@ -631,9 +630,13 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file, */ denied = request & ~fctx->allow; if (unconfined(label) || unconfined(flabel) || - (!denied && aa_label_is_subset(flabel, label))) + (!denied && aa_label_is_subset(flabel, label))) { + rcu_read_unlock(); goto done; + } + flabel = aa_get_newest_label(flabel); + rcu_read_unlock(); /* TODO: label cross check */ if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry)) @@ -643,8 +646,9 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file, else if (S_ISSOCK(file_inode(file)->i_mode)) error = __file_sock_perm(op, label, flabel, file, request, denied); -done: aa_put_label(flabel); + +done: return error; } diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c index 4ed6688f9d40..e0828ee7a345 100644 --- a/security/apparmor/mount.c +++ b/security/apparmor/mount.c @@ -442,7 +442,7 @@ int aa_bind_mount(struct aa_label *label, const struct path *path, buffer = aa_get_buffer(false); old_buffer = aa_get_buffer(false); error = -ENOMEM; - if (!buffer || old_buffer) + if (!buffer || !old_buffer) goto out; error = fn_for_each_confined(label, profile, diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 03104830c913..269f2f53c0b1 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -1125,8 +1125,8 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ mutex_lock_nested(&ns->parent->lock, ns->level); - __aa_remove_ns(ns); __aa_bump_ns_revision(ns); + __aa_remove_ns(ns); mutex_unlock(&ns->parent->lock); } else { /* remove profile */ @@ -1138,9 +1138,9 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, goto fail_ns_lock; } name = profile->base.hname; + __aa_bump_ns_revision(ns); __remove_profile(profile); __aa_labelset_update_subtree(ns); - __aa_bump_ns_revision(ns); mutex_unlock(&ns->lock); } diff --git a/security/keys/Kconfig b/security/keys/Kconfig index dd313438fecf..47c041563d41 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig @@ -21,10 +21,6 @@ config KEYS If you are unsure as to whether this is required, answer N. -config KEYS_COMPAT - def_bool y - depends on COMPAT && KEYS - config KEYS_REQUEST_CACHE bool "Enable temporary caching of the last request_key() result" depends on KEYS diff --git a/security/keys/Makefile b/security/keys/Makefile index 074f27538f55..5f40807f05b3 100644 --- a/security/keys/Makefile +++ b/security/keys/Makefile @@ -17,7 +17,7 @@ obj-y := \ request_key_auth.o \ user_defined.o compat-obj-$(CONFIG_KEY_DH_OPERATIONS) += compat_dh.o -obj-$(CONFIG_KEYS_COMPAT) += compat.o $(compat-obj-y) +obj-$(CONFIG_COMPAT) += compat.o $(compat-obj-y) obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSCTL) += sysctl.o obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o diff --git a/security/keys/compat.c b/security/keys/compat.c index 9bcc404131aa..b975f8f11124 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c @@ -46,11 +46,6 @@ static long compat_keyctl_instantiate_key_iov( /* * The key control system call, 32-bit compatibility version for 64-bit archs - * - * This should only be called if the 64-bit arch uses weird pointers in 32-bit - * mode or doesn't guarantee that the top 32-bits of the argument registers on - * taking a 32-bit syscall are zero. If you can, you should call sys_keyctl() - * directly. */ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option, u32, arg2, u32, arg3, u32, arg4, u32, arg5) diff --git a/security/keys/internal.h b/security/keys/internal.h index c039373488bd..ba3e2da14cef 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -264,7 +264,7 @@ extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *, size_t, struct keyctl_kdf_params __user *); extern long __keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *, size_t, struct keyctl_kdf_params *); -#ifdef CONFIG_KEYS_COMPAT +#ifdef CONFIG_COMPAT extern long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params, char __user *buffer, size_t buflen, struct compat_keyctl_kdf_params __user *kdf); @@ -279,7 +279,7 @@ static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params, return -EOPNOTSUPP; } -#ifdef CONFIG_KEYS_COMPAT +#ifdef CONFIG_COMPAT static inline long compat_keyctl_dh_compute( struct keyctl_dh_params __user *params, char __user *buffer, size_t buflen, diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c index a9810ac2776f..08ec7f48f01d 100644 --- a/security/keys/trusted-keys/trusted_tpm2.c +++ b/security/keys/trusted-keys/trusted_tpm2.c @@ -309,6 +309,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, return rc; rc = tpm2_unseal_cmd(chip, payload, options, blob_handle); + tpm2_flush_context(chip, blob_handle); return rc; } diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index dd3d5942e669..c36bafbcd77e 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -951,7 +951,8 @@ static bool tomoyo_manager(void) exe = tomoyo_get_exe(); if (!exe) return false; - list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) { + list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (!ptr->head.is_deleted && (!tomoyo_pathcmp(domainname, ptr->manager) || !strcmp(exe, ptr->manager->name))) { @@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname) if (mutex_lock_interruptible(&tomoyo_policy_lock)) return -EINTR; /* Is there an active domain? */ - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, + srcu_read_lock_held(&tomoyo_ss)) { /* Never delete tomoyo_kernel_domain */ if (domain == &tomoyo_kernel_domain) continue; @@ -2778,7 +2780,8 @@ void tomoyo_check_profile(void) tomoyo_policy_loaded = true; pr_info("TOMOYO: 2.6.0\n"); - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, + srcu_read_lock_held(&tomoyo_ss)) { const u8 profile = domain->profile; struct tomoyo_policy_namespace *ns = domain->ns; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 8526a0a74023..7869d6a9980b 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, if (mutex_lock_interruptible(&tomoyo_policy_lock)) return -ENOMEM; - list_for_each_entry_rcu(entry, list, list) { + list_for_each_entry_rcu(entry, list, list, + srcu_read_lock_held(&tomoyo_ss)) { if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) continue; if (!check_duplicate(entry, new_entry)) @@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, } if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - list_for_each_entry_rcu(entry, list, list) { + list_for_each_entry_rcu(entry, list, list, + srcu_read_lock_held(&tomoyo_ss)) { if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) continue; if (!tomoyo_same_acl_head(entry, new_entry) || @@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r, u16 i = 0; retry: - list_for_each_entry_rcu(ptr, list, list) { + list_for_each_entry_rcu(ptr, list, list, + srcu_read_lock_held(&tomoyo_ss)) { if (ptr->is_deleted || ptr->type != r->param_type) continue; if (!check_entry(r, ptr)) @@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition { const struct tomoyo_transition_control *ptr; - list_for_each_entry_rcu(ptr, list, head.list) { + list_for_each_entry_rcu(ptr, list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (ptr->head.is_deleted || ptr->type != type) continue; if (ptr->domainname) { @@ -735,7 +739,8 @@ retry: /* Check 'aggregator' directive. */ candidate = &exename; - list_for_each_entry_rcu(ptr, list, head.list) { + list_for_each_entry_rcu(ptr, list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (ptr->head.is_deleted || !tomoyo_path_matches_pattern(&exename, ptr->original_name)) diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c index a37c7dc66e44..1cecdd797597 100644 --- a/security/tomoyo/group.c +++ b/security/tomoyo/group.c @@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, { struct tomoyo_path_group *member; - list_for_each_entry_rcu(member, &group->member_list, head.list) { + list_for_each_entry_rcu(member, &group->member_list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (member->head.is_deleted) continue; if (!tomoyo_path_matches_pattern(pathname, member->member_name)) @@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min, struct tomoyo_number_group *member; bool matched = false; - list_for_each_entry_rcu(member, &group->member_list, head.list) { + list_for_each_entry_rcu(member, &group->member_list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (member->head.is_deleted) continue; if (min > member->number.values[1] || @@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, bool matched = false; const u8 size = is_ipv6 ? 16 : 4; - list_for_each_entry_rcu(member, &group->member_list, head.list) { + list_for_each_entry_rcu(member, &group->member_list, head.list, + srcu_read_lock_held(&tomoyo_ss)) { if (member->head.is_deleted) continue; if (member->address.is_ipv6 != is_ipv6) diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index e7832448d721..bf38fc1b59b2 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c @@ -218,31 +218,6 @@ out: } /** - * tomoyo_get_socket_name - Get the name of a socket. - * - * @path: Pointer to "struct path". - * @buffer: Pointer to buffer to return value in. - * @buflen: Sizeof @buffer. - * - * Returns the buffer. - */ -static char *tomoyo_get_socket_name(const struct path *path, char * const buffer, - const int buflen) -{ - struct inode *inode = d_backing_inode(path->dentry); - struct socket *sock = inode ? SOCKET_I(inode) : NULL; - struct sock *sk = sock ? sock->sk : NULL; - - if (sk) { - snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]", - sk->sk_family, sk->sk_type, sk->sk_protocol); - } else { - snprintf(buffer, buflen, "socket:[unknown]"); - } - return buffer; -} - -/** * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root. * * @path: Pointer to "struct path". @@ -279,12 +254,7 @@ char *tomoyo_realpath_from_path(const struct path *path) break; /* To make sure that pos is '\0' terminated. */ buf[buf_len - 1] = '\0'; - /* Get better name for socket. */ - if (sb->s_magic == SOCKFS_MAGIC) { - pos = tomoyo_get_socket_name(path, buf, buf_len - 1); - goto encode; - } - /* For "pipe:[\$]". */ + /* For "pipe:[\$]" and "socket:[\$]". */ if (dentry->d_op && dentry->d_op->d_dname) { pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1); goto encode; diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index 52752e1a84ed..eba0b3395851 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname) name.name = domainname; tomoyo_fill_path_info(&name); - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, + srcu_read_lock_held(&tomoyo_ss)) { if (!domain->is_deleted && !tomoyo_pathcmp(&name, domain->domainname)) return domain; @@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r) return false; if (!domain) return true; - list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) { + list_for_each_entry_rcu(ptr, &domain->acl_info_list, list, + srcu_read_lock_held(&tomoyo_ss)) { u16 perm; u8 i; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 1fe581167b7b..d083225344a0 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -739,6 +739,10 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size) runtime->boundary *= 2; + /* clear the buffer for avoiding possible kernel info leaks */ + if (runtime->dma_area && !substream->ops->copy_user) + memset(runtime->dma_area, 0, runtime->dma_bytes); + snd_pcm_timer_resolution_change(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c index 63dc7bdb622d..be59b59c9be4 100644 --- a/sound/core/seq/seq_timer.c +++ b/sound/core/seq/seq_timer.c @@ -471,15 +471,19 @@ void snd_seq_info_timer_read(struct snd_info_entry *entry, q = queueptr(idx); if (q == NULL) continue; - if ((tmr = q->timer) == NULL || - (ti = tmr->timeri) == NULL) { - queuefree(q); - continue; - } + mutex_lock(&q->timer_mutex); + tmr = q->timer; + if (!tmr) + goto unlock; + ti = tmr->timeri; + if (!ti) + goto unlock; snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name); resolution = snd_timer_resolution(ti) * tmr->ticks; snd_iprintf(buffer, " Period time : %lu.%09lu\n", resolution / 1000000000, resolution % 1000000000); snd_iprintf(buffer, " Skew : %u / %u\n", tmr->skew, tmr->skew_base); +unlock: + mutex_unlock(&q->timer_mutex); queuefree(q); } } diff --git a/sound/firewire/dice/dice-extension.c b/sound/firewire/dice/dice-extension.c index a63fcbc875ad..02f4a8318e38 100644 --- a/sound/firewire/dice/dice-extension.c +++ b/sound/firewire/dice/dice-extension.c @@ -159,8 +159,11 @@ int snd_dice_detect_extension_formats(struct snd_dice *dice) int j; for (j = i + 1; j < 9; ++j) { - if (pointers[i * 2] == pointers[j * 2]) + if (pointers[i * 2] == pointers[j * 2]) { + // Fallback to limited functionality. + err = -ENXIO; goto end; + } } } diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c index e80bb84c43f6..f823a2ab3544 100644 --- a/sound/firewire/tascam/amdtp-tascam.c +++ b/sound/firewire/tascam/amdtp-tascam.c @@ -157,14 +157,15 @@ static void read_status_messages(struct amdtp_stream *s, if ((before ^ after) & mask) { struct snd_firewire_tascam_change *entry = &tscm->queue[tscm->push_pos]; + unsigned long flag; - spin_lock_irq(&tscm->lock); + spin_lock_irqsave(&tscm->lock, flag); entry->index = index; entry->before = before; entry->after = after; if (++tscm->push_pos >= SND_TSCM_QUEUE_COUNT) tscm->push_pos = 0; - spin_unlock_irq(&tscm->lock); + spin_unlock_irqrestore(&tscm->lock, flag); wake_up(&tscm->hwdep_wait); } diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index 906b1e20bae0..286361ecd640 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -363,7 +363,6 @@ static const struct regmap_config hda_regmap_cfg = { .reg_write = hda_reg_write, .use_single_read = true, .use_single_write = true, - .disable_locking = true, }; /** diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index f9707fb05efe..682ed39f79b0 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -120,10 +120,8 @@ void snd_hdac_stream_clear(struct hdac_stream *azx_dev) snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_DMA_START | SD_INT_MASK, 0); snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ - if (azx_dev->stripe) { + if (azx_dev->stripe) snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); - azx_dev->stripe = 0; - } azx_dev->running = false; } EXPORT_SYMBOL_GPL(snd_hdac_stream_clear); diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 2f3b7a35f2d9..ba56b59b3e17 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -883,7 +883,7 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr, return -EAGAIN; /* give a chance to retry */ } - dev_WARN(chip->card->dev, + dev_err(chip->card->dev, "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n", bus->last_cmd[addr]); chip->single_cmd = 1; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index b856b89378ac..8ef223aa1e37 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -125,7 +125,7 @@ static char *patch[SNDRV_CARDS]; static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = CONFIG_SND_HDA_INPUT_BEEP_MODE}; #endif -static bool dsp_driver = 1; +static bool dmic_detect = 1; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Intel HD audio interface."); @@ -160,9 +160,10 @@ module_param_array(beep_mode, bool, NULL, 0444); MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode " "(0=off, 1=on) (default=1)."); #endif -module_param(dsp_driver, bool, 0444); -MODULE_PARM_DESC(dsp_driver, "Allow DSP driver selection (bypass this driver) " - "(0=off, 1=on) (default=1)"); +module_param(dmic_detect, bool, 0444); +MODULE_PARM_DESC(dmic_detect, "Allow DSP driver selection (bypass this driver) " + "(0=off, 1=on) (default=1); " + "deprecated, use snd-intel-dspcfg.dsp_driver option instead"); #ifdef CONFIG_PM static int param_set_xint(const char *val, const struct kernel_param *kp); @@ -282,12 +283,13 @@ enum { /* quirks for old Intel chipsets */ #define AZX_DCAPS_INTEL_ICH \ - (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE) + (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE |\ + AZX_DCAPS_SYNC_WRITE) /* quirks for Intel PCH */ #define AZX_DCAPS_INTEL_PCH_BASE \ (AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\ - AZX_DCAPS_SNOOP_TYPE(SCH)) + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) /* PCH up to IVB; no runtime PM; bind with i915 gfx */ #define AZX_DCAPS_INTEL_PCH_NOPM \ @@ -302,13 +304,13 @@ enum { #define AZX_DCAPS_INTEL_HASWELL \ (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\ AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\ - AZX_DCAPS_SNOOP_TYPE(SCH)) + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) /* Broadwell HDMI can't use position buffer reliably, force to use LPIB */ #define AZX_DCAPS_INTEL_BROADWELL \ (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\ AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\ - AZX_DCAPS_SNOOP_TYPE(SCH)) + AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) #define AZX_DCAPS_INTEL_BAYTRAIL \ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT) @@ -1410,7 +1412,17 @@ static bool atpx_present(void) acpi_handle dhandle, atpx_handle; acpi_status status; - while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (dhandle) { + status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); + if (!ACPI_FAILURE(status)) { + pci_dev_put(pdev); + return true; + } + } + } + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { dhandle = ACPI_HANDLE(&pdev->dev); if (dhandle) { status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); @@ -2088,11 +2100,13 @@ static int azx_probe(struct pci_dev *pci, /* * stop probe if another Intel's DSP driver should be activated */ - if (dsp_driver) { + if (dmic_detect) { err = snd_intel_dsp_driver_probe(pci); if (err != SND_INTEL_DSP_DRIVER_ANY && err != SND_INTEL_DSP_DRIVER_LEGACY) return -ENODEV; + } else { + dev_warn(&pci->dev, "dmic_detect option is deprecated, pass snd-intel-dspcfg.dsp_driver=1 option instead\n"); } err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE, diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index b7a1abb3e231..32ed46464af7 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -1809,13 +1809,14 @@ struct scp_msg { static void dspio_clear_response_queue(struct hda_codec *codec) { + unsigned long timeout = jiffies + msecs_to_jiffies(1000); unsigned int dummy = 0; - int status = -1; + int status; /* clear all from the response queue */ do { status = dspio_read(codec, &dummy); - } while (status == 0); + } while (status == 0 && time_before(jiffies, timeout)); } static int dspio_get_response_data(struct hda_codec *codec) @@ -7588,12 +7589,14 @@ static void ca0132_process_dsp_response(struct hda_codec *codec, struct ca0132_spec *spec = codec->spec; codec_dbg(codec, "ca0132_process_dsp_response\n"); + snd_hda_power_up_pm(codec); if (spec->wait_scp) { if (dspio_get_response_data(codec) >= 0) spec->wait_scp = 0; } dspio_clear_response_queue(codec); + snd_hda_power_down_pm(codec); } static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) @@ -7604,11 +7607,10 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) /* Delay enabling the HP amp, to let the mic-detection * state machine run. */ - cancel_delayed_work(&spec->unsol_hp_work); - schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); tbl = snd_hda_jack_tbl_get(codec, cb->nid); if (tbl) tbl->block_report = 1; + schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); } static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb) @@ -8454,12 +8456,25 @@ static void ca0132_reboot_notify(struct hda_codec *codec) codec->patch_ops.free(codec); } +#ifdef CONFIG_PM +static int ca0132_suspend(struct hda_codec *codec) +{ + struct ca0132_spec *spec = codec->spec; + + cancel_delayed_work_sync(&spec->unsol_hp_work); + return 0; +} +#endif + static const struct hda_codec_ops ca0132_patch_ops = { .build_controls = ca0132_build_controls, .build_pcms = ca0132_build_pcms, .init = ca0132_init, .free = ca0132_free, .unsol_event = snd_hda_jack_unsol_event, +#ifdef CONFIG_PM + .suspend = ca0132_suspend, +#endif .reboot_notify = ca0132_reboot_notify, }; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 78647ee02339..630b1f5c276d 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2021,6 +2021,8 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, per_cvt->assigned = 0; hinfo->nid = 0; + azx_stream(get_azx_dev(substream))->stripe = 0; + mutex_lock(&spec->pcm_lock); snd_hda_spdif_ctls_unassign(codec, pcm_idx); clear_bit(pcm_idx, &spec->pcm_in_use); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index dbfafee97931..f2ea3528bfb1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -412,6 +412,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0672: alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */ break; + case 0x10ec0222: case 0x10ec0623: alc_update_coef_idx(codec, 0x19, 1<<13, 0); break; @@ -430,6 +431,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) break; case 0x10ec0899: case 0x10ec0900: + case 0x10ec0b00: case 0x10ec1168: case 0x10ec1220: alc_update_coef_idx(codec, 0x7, 1<<1, 0); @@ -501,6 +503,7 @@ static void alc_shutup_pins(struct hda_codec *codec) struct alc_spec *spec = codec->spec; switch (codec->core.vendor_id) { + case 0x10ec0283: case 0x10ec0286: case 0x10ec0288: case 0x10ec0298: @@ -2525,6 +2528,7 @@ static int patch_alc882(struct hda_codec *codec) case 0x10ec0882: case 0x10ec0885: case 0x10ec0900: + case 0x10ec0b00: case 0x10ec1220: break; default: @@ -5904,9 +5908,12 @@ enum { ALC256_FIXUP_ASUS_HEADSET_MIC, ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, ALC299_FIXUP_PREDATOR_SPK, - ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC, ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, - ALC294_FIXUP_ASUS_INTSPK_GPIO, + ALC289_FIXUP_DELL_SPK2, + ALC289_FIXUP_DUAL_SPK, + ALC294_FIXUP_SPK2_TO_DAC1, + ALC294_FIXUP_ASUS_DUAL_SPK, + }; static const struct hda_fixup alc269_fixups[] = { @@ -6981,33 +6988,45 @@ static const struct hda_fixup alc269_fixups[] = { { } } }, - [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = { + [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { - { 0x14, 0x411111f0 }, /* disable confusing internal speaker */ - { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */ + { 0x19, 0x04a11040 }, + { 0x21, 0x04211020 }, { } }, .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, - [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = { + [ALC289_FIXUP_DELL_SPK2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { - { 0x19, 0x04a11040 }, - { 0x21, 0x04211020 }, + { 0x17, 0x90170130 }, /* bass spk */ { } }, .chained = true, - .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE + .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE }, - [ALC294_FIXUP_ASUS_INTSPK_GPIO] = { + [ALC289_FIXUP_DUAL_SPK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_speaker2_to_dac1, + .chained = true, + .chain_id = ALC289_FIXUP_DELL_SPK2 + }, + [ALC294_FIXUP_SPK2_TO_DAC1] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_speaker2_to_dac1, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC + }, + [ALC294_FIXUP_ASUS_DUAL_SPK] = { .type = HDA_FIXUP_FUNC, /* The GPIO must be pulled to initialize the AMP */ .v.func = alc_fixup_gpio4, .chained = true, - .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC + .chain_id = ALC294_FIXUP_SPK2_TO_DAC1 }, + }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7080,6 +7099,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -7167,7 +7188,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), - SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO), + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), @@ -7239,6 +7260,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1), SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), @@ -9237,6 +9259,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662), HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882), HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882), + HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882), HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882), HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882), {} /* terminator */ diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c index c80a16ee6e76..242542e23d28 100644 --- a/sound/pci/ice1712/ice1724.c +++ b/sound/pci/ice1712/ice1724.c @@ -647,6 +647,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, unsigned long flags; unsigned char mclk_change; unsigned int i, old_rate; + bool call_set_rate = false; if (rate > ice->hw_rates->list[ice->hw_rates->count - 1]) return -EINVAL; @@ -670,7 +671,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, * setting clock rate for internal clock mode */ old_rate = ice->get_rate(ice); if (force || (old_rate != rate)) - ice->set_rate(ice, rate); + call_set_rate = true; else if (rate == ice->cur_rate) { spin_unlock_irqrestore(&ice->reg_lock, flags); return 0; @@ -678,12 +679,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, } ice->cur_rate = rate; + spin_unlock_irqrestore(&ice->reg_lock, flags); + + if (call_set_rate) + ice->set_rate(ice, rate); /* setting master clock */ mclk_change = ice->set_mclk(ice, rate); - spin_unlock_irqrestore(&ice->reg_lock, flags); - if (mclk_change && ice->gpio.i2s_mclk_changed) ice->gpio.i2s_mclk_changed(ice); if (ice->gpio.set_pro_rate) diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c index f4ee6798154a..7a5621e5e233 100644 --- a/sound/soc/amd/acp-da7219-max98357a.c +++ b/sound/soc/amd/acp-da7219-max98357a.c @@ -96,14 +96,19 @@ static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd) return 0; } -static int da7219_clk_enable(struct snd_pcm_substream *substream, - int wclk_rate, int bclk_rate) +static int da7219_clk_enable(struct snd_pcm_substream *substream) { int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; - clk_set_rate(da7219_dai_wclk, wclk_rate); - clk_set_rate(da7219_dai_bclk, bclk_rate); + /* + * Set wclk to 48000 because the rate constraint of this driver is + * 48000. ADAU7002 spec: "The ADAU7002 requires a BCLK rate that is + * minimum of 64x the LRCLK sample rate." DA7219 is the only clk + * source so for all codecs we have to limit bclk to 64X lrclk. + */ + clk_set_rate(da7219_dai_wclk, 48000); + clk_set_rate(da7219_dai_bclk, 48000 * 64); ret = clk_prepare_enable(da7219_dai_bclk); if (ret < 0) { dev_err(rtd->dev, "can't enable master clock %d\n", ret); @@ -156,7 +161,7 @@ static int cz_da7219_play_startup(struct snd_pcm_substream *substream) &constraints_rates); machine->play_i2s_instance = I2S_SP_INSTANCE; - return 0; + return da7219_clk_enable(substream); } static int cz_da7219_cap_startup(struct snd_pcm_substream *substream) @@ -178,7 +183,7 @@ static int cz_da7219_cap_startup(struct snd_pcm_substream *substream) machine->cap_i2s_instance = I2S_SP_INSTANCE; machine->capture_channel = CAP_CHANNEL1; - return 0; + return da7219_clk_enable(substream); } static int cz_max_startup(struct snd_pcm_substream *substream) @@ -199,7 +204,7 @@ static int cz_max_startup(struct snd_pcm_substream *substream) &constraints_rates); machine->play_i2s_instance = I2S_BT_INSTANCE; - return 0; + return da7219_clk_enable(substream); } static int cz_dmic0_startup(struct snd_pcm_substream *substream) @@ -220,7 +225,7 @@ static int cz_dmic0_startup(struct snd_pcm_substream *substream) &constraints_rates); machine->cap_i2s_instance = I2S_BT_INSTANCE; - return 0; + return da7219_clk_enable(substream); } static int cz_dmic1_startup(struct snd_pcm_substream *substream) @@ -242,25 +247,7 @@ static int cz_dmic1_startup(struct snd_pcm_substream *substream) machine->cap_i2s_instance = I2S_SP_INSTANCE; machine->capture_channel = CAP_CHANNEL0; - return 0; -} - -static int cz_da7219_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params) -{ - int wclk, bclk; - - wclk = params_rate(params); - bclk = wclk * params_channels(params) * - snd_pcm_format_width(params_format(params)); - /* ADAU7002 spec: "The ADAU7002 requires a BCLK rate - * that is minimum of 64x the LRCLK sample rate." - * DA7219 is the only clk source so for all codecs - * we have to limit bclk to 64X lrclk. - */ - if (bclk < (wclk * 64)) - bclk = wclk * 64; - return da7219_clk_enable(substream, wclk, bclk); + return da7219_clk_enable(substream); } static void cz_da7219_shutdown(struct snd_pcm_substream *substream) @@ -271,31 +258,26 @@ static void cz_da7219_shutdown(struct snd_pcm_substream *substream) static const struct snd_soc_ops cz_da7219_play_ops = { .startup = cz_da7219_play_startup, .shutdown = cz_da7219_shutdown, - .hw_params = cz_da7219_params, }; static const struct snd_soc_ops cz_da7219_cap_ops = { .startup = cz_da7219_cap_startup, .shutdown = cz_da7219_shutdown, - .hw_params = cz_da7219_params, }; static const struct snd_soc_ops cz_max_play_ops = { .startup = cz_max_startup, .shutdown = cz_da7219_shutdown, - .hw_params = cz_da7219_params, }; static const struct snd_soc_ops cz_dmic0_cap_ops = { .startup = cz_dmic0_startup, .shutdown = cz_da7219_shutdown, - .hw_params = cz_da7219_params, }; static const struct snd_soc_ops cz_dmic1_cap_ops = { .startup = cz_dmic1_startup, .shutdown = cz_da7219_shutdown, - .hw_params = cz_da7219_params, }; SND_SOC_DAILINK_DEF(designware1, diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c index 7b17f39a6a10..ce3ed056ea8b 100644 --- a/sound/soc/codecs/cros_ec_codec.c +++ b/sound/soc/codecs/cros_ec_codec.c @@ -10,6 +10,7 @@ #include <crypto/hash.h> #include <crypto/sha.h> +#include <linux/acpi.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/io.h> @@ -1047,10 +1048,17 @@ static const struct of_device_id cros_ec_codec_of_match[] = { MODULE_DEVICE_TABLE(of, cros_ec_codec_of_match); #endif +static const struct acpi_device_id cros_ec_codec_acpi_id[] = { + { "GOOG0013", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, cros_ec_codec_acpi_id); + static struct platform_driver cros_ec_codec_platform_driver = { .driver = { .name = "cros-ec-codec", .of_match_table = of_match_ptr(cros_ec_codec_of_match), + .acpi_match_table = ACPI_PTR(cros_ec_codec_acpi_id), }, .probe = cros_ec_codec_platform_probe, }; diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c index 6803d39e09a5..43110151e928 100644 --- a/sound/soc/codecs/hdac_hda.c +++ b/sound/soc/codecs/hdac_hda.c @@ -588,7 +588,9 @@ static int hdac_hda_dev_remove(struct hdac_device *hdev) struct hdac_hda_priv *hda_pvt; hda_pvt = dev_get_drvdata(&hdev->dev); - cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work); + if (hda_pvt && hda_pvt->codec.registered) + cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work); + return 0; } diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index f6bf4cfbea23..e46b6ada13b1 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -2103,26 +2103,40 @@ static void max98090_pll_det_disable_work(struct work_struct *work) M98090_IULK_MASK, 0); } -static void max98090_pll_work(struct work_struct *work) +static void max98090_pll_work(struct max98090_priv *max98090) { - struct max98090_priv *max98090 = - container_of(work, struct max98090_priv, pll_work); struct snd_soc_component *component = max98090->component; + unsigned int pll; + int i; if (!snd_soc_component_is_active(component)) return; dev_info_ratelimited(component->dev, "PLL unlocked\n"); + /* + * As the datasheet suggested, the maximum PLL lock time should be + * 7 msec. The workaround resets the codec softly by toggling SHDN + * off and on if PLL failed to lock for 10 msec. Notably, there is + * no suggested hold time for SHDN off. + */ + /* Toggle shutdown OFF then ON */ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, 0); - msleep(10); snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, M98090_SHDNN_MASK); - /* Give PLL time to lock */ - msleep(10); + for (i = 0; i < 10; ++i) { + /* Give PLL time to lock */ + usleep_range(1000, 1200); + + /* Check lock status */ + pll = snd_soc_component_read32( + component, M98090_REG_DEVICE_STATUS); + if (!(pll & M98090_ULK_MASK)) + break; + } } static void max98090_jack_work(struct work_struct *work) @@ -2259,7 +2273,7 @@ static irqreturn_t max98090_interrupt(int irq, void *data) if (active & M98090_ULK_MASK) { dev_dbg(component->dev, "M98090_ULK_MASK\n"); - schedule_work(&max98090->pll_work); + max98090_pll_work(max98090); } if (active & M98090_JDET_MASK) { @@ -2422,7 +2436,6 @@ static int max98090_probe(struct snd_soc_component *component) max98090_pll_det_enable_work); INIT_WORK(&max98090->pll_det_disable_work, max98090_pll_det_disable_work); - INIT_WORK(&max98090->pll_work, max98090_pll_work); /* Enable jack detection */ snd_soc_component_write(component, M98090_REG_JACK_DETECT, @@ -2475,7 +2488,6 @@ static void max98090_remove(struct snd_soc_component *component) cancel_delayed_work_sync(&max98090->jack_work); cancel_delayed_work_sync(&max98090->pll_det_enable_work); cancel_work_sync(&max98090->pll_det_disable_work); - cancel_work_sync(&max98090->pll_work); max98090->component = NULL; } diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h index 57965cd678b4..a197114b0dad 100644 --- a/sound/soc/codecs/max98090.h +++ b/sound/soc/codecs/max98090.h @@ -1530,7 +1530,6 @@ struct max98090_priv { struct delayed_work jack_work; struct delayed_work pll_det_enable_work; struct work_struct pll_det_disable_work; - struct work_struct pll_work; struct snd_soc_jack *jack; unsigned int dai_fmt; int tdm_slots; diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index f53235be77d9..1f7964beb20c 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -396,9 +396,6 @@ static int pm8916_wcd_analog_enable_micbias_int(struct snd_soc_component switch (event) { case SND_SOC_DAPM_PRE_PMU: - snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS, - MICB_1_INT_TX2_INT_RBIAS_EN_MASK, - MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE); snd_soc_component_update_bits(component, reg, MICB_1_EN_PULL_DOWN_EN_MASK, 0); snd_soc_component_update_bits(component, CDC_A_MICB_1_EN, MICB_1_EN_OPA_STG2_TAIL_CURR_MASK, @@ -448,6 +445,14 @@ static int pm8916_wcd_analog_enable_micbias_int1(struct struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component); + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS, + MICB_1_INT_TX1_INT_RBIAS_EN_MASK, + MICB_1_INT_TX1_INT_RBIAS_EN_ENABLE); + break; + } + return pm8916_wcd_analog_enable_micbias_int(component, event, w->reg, wcd->micbias1_cap_mode); } @@ -558,6 +563,11 @@ static int pm8916_wcd_analog_enable_micbias_int2(struct struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component); switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS, + MICB_1_INT_TX2_INT_RBIAS_EN_MASK, + MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE); + break; case SND_SOC_DAPM_POST_PMU: pm8916_mbhc_configure_bias(wcd, true); break; @@ -938,10 +948,10 @@ static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("MIC BIAS External1", CDC_A_MICB_1_EN, 7, 0, pm8916_wcd_analog_enable_micbias_ext1, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("MIC BIAS External2", CDC_A_MICB_2_EN, 7, 0, pm8916_wcd_analog_enable_micbias_ext2, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_ADC_E("ADC1", NULL, CDC_A_TX_1_EN, 7, 0, pm8916_wcd_analog_enable_adc, diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index 58b2468fb2a7..09fccacadd6b 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -586,6 +586,12 @@ static int msm8916_wcd_digital_enable_interpolator( snd_soc_component_write(component, rx_gain_reg[w->shift], snd_soc_component_read32(component, rx_gain_reg[w->shift])); break; + case SND_SOC_DAPM_POST_PMD: + snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL, + 1 << w->shift, 1 << w->shift); + snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL, + 1 << w->shift, 0x0); + break; } return 0; } diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index adbae1f36a8a..747ca248bf10 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c @@ -2432,6 +2432,13 @@ static void rt5640_disable_jack_detect(struct snd_soc_component *component) { struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component); + /* + * soc_remove_component() force-disables jack and thus rt5640->jack + * could be NULL at the time of driver's module unloading. + */ + if (!rt5640->jack) + return; + disable_irq(rt5640->irq); rt5640_cancel_work(rt5640); diff --git a/sound/soc/codecs/rt5677-spi.h b/sound/soc/codecs/rt5677-spi.h index 3af36ec928e9..088b77931727 100644 --- a/sound/soc/codecs/rt5677-spi.h +++ b/sound/soc/codecs/rt5677-spi.h @@ -9,9 +9,25 @@ #ifndef __RT5677_SPI_H__ #define __RT5677_SPI_H__ +#if IS_ENABLED(CONFIG_SND_SOC_RT5677_SPI) int rt5677_spi_read(u32 addr, void *rxbuf, size_t len); int rt5677_spi_write(u32 addr, const void *txbuf, size_t len); int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw); void rt5677_spi_hotword_detected(void); +#else +static inline int rt5677_spi_read(u32 addr, void *rxbuf, size_t len) +{ + return -EINVAL; +} +static inline int rt5677_spi_write(u32 addr, const void *txbuf, size_t len) +{ + return -EINVAL; +} +static inline int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw) +{ + return -EINVAL; +} +static inline void rt5677_spi_hotword_detected(void){} +#endif #endif /* __RT5677_SPI_H__ */ diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index b1713fffa3eb..ae6f6121bc1b 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -73,6 +73,7 @@ struct rt5682_priv { static const struct reg_sequence patch_list[] = { {RT5682_HP_IMP_SENS_CTRL_19, 0x1000}, {RT5682_DAC_ADC_DIG_VOL1, 0xa020}, + {RT5682_I2C_CTRL, 0x000f}, }; static const struct reg_default rt5682_reg[] = { @@ -2474,6 +2475,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682) mutex_lock(&rt5682->calibrate_mutex); rt5682_reset(rt5682->regmap); + regmap_write(rt5682->regmap, RT5682_I2C_CTRL, 0x000f); regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2af); usleep_range(15000, 20000); regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xf2af); diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 7d7ea15d73e0..5ffbaddd6e49 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -1806,6 +1806,12 @@ static int wm8904_set_sysclk(struct snd_soc_dai *dai, int clk_id, switch (clk_id) { case WM8904_CLK_AUTO: + /* We don't have any rate constraints, so just ignore the + * request to disable constraining. + */ + if (!freq) + return 0; + mclk_freq = clk_get_rate(priv->mclk); /* enable FLL if a different sysclk is desired */ if (mclk_freq != freq) { diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 3e5c69fbc33a..d9d59f45833f 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -2788,7 +2788,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref, if (target % Fref == 0) { fll_div->theta = 0; - fll_div->lambda = 0; + fll_div->lambda = 1; } else { gcd_fll = gcd(target, fratio * Fref); @@ -2858,7 +2858,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s return -EINVAL; } - if (fll_div.theta || fll_div.lambda) + if (fll_div.theta) fll1 |= WM8962_FLL_FRAC; /* Stop the FLL while we reconfigure */ diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c index a1db1bce330f..5faecbeb5497 100644 --- a/sound/soc/fsl/fsl_audmix.c +++ b/sound/soc/fsl/fsl_audmix.c @@ -505,15 +505,20 @@ static int fsl_audmix_probe(struct platform_device *pdev) ARRAY_SIZE(fsl_audmix_dai)); if (ret) { dev_err(dev, "failed to register ASoC DAI\n"); - return ret; + goto err_disable_pm; } priv->pdev = platform_device_register_data(dev, mdrv, 0, NULL, 0); if (IS_ERR(priv->pdev)) { ret = PTR_ERR(priv->pdev); dev_err(dev, "failed to register platform %s: %d\n", mdrv, ret); + goto err_disable_pm; } + return 0; + +err_disable_pm: + pm_runtime_disable(dev); return ret; } @@ -521,6 +526,8 @@ static int fsl_audmix_remove(struct platform_device *pdev) { struct fsl_audmix *priv = dev_get_drvdata(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (priv->pdev) platform_device_unregister(priv->pdev); diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 10b82bf043d1..55e9f8800b3e 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -371,6 +371,7 @@ static int simple_for_each_link(struct asoc_simple_priv *priv, do { struct asoc_simple_data adata; struct device_node *codec; + struct device_node *plat; struct device_node *np; int num = of_get_child_count(node); @@ -381,6 +382,9 @@ static int simple_for_each_link(struct asoc_simple_priv *priv, ret = -ENODEV; goto error; } + /* get platform */ + plat = of_get_child_by_name(node, is_top ? + PREFIX "plat" : "plat"); /* get convert-xxx property */ memset(&adata, 0, sizeof(adata)); @@ -389,6 +393,8 @@ static int simple_for_each_link(struct asoc_simple_priv *priv, /* loop for all CPU/Codec node */ for_each_child_of_node(node, np) { + if (plat == np) + continue; /* * It is DPCM * if it has many CPUs, diff --git a/sound/soc/intel/atom/sst/sst.c b/sound/soc/intel/atom/sst/sst.c index fbecbb74350b..68bcec5241f7 100644 --- a/sound/soc/intel/atom/sst/sst.c +++ b/sound/soc/intel/atom/sst/sst.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/firmware.h> #include <linux/pm_runtime.h> #include <linux/pm_qos.h> diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 46612331f5ea..54e97455d7f6 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -442,7 +442,8 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"), DMI_MATCH(DMI_PRODUCT_NAME, "NB41"), }, - .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP + .driver_data = (void *)(BYT_CHT_ES8316_SSP0 + | BYT_CHT_ES8316_INTMIC_IN2_MAP | BYT_CHT_ES8316_JD_INVERTED), }, { /* Teclast X98 Plus II */ diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index dd2b5ad08659..243f683bc02a 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -707,13 +707,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_MCLK_EN), }, { + /* Teclast X89 */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"), DMI_MATCH(DMI_BOARD_NAME, "tPAD"), }, .driver_data = (void *)(BYT_RT5640_IN3_MAP | - BYT_RT5640_MCLK_EN | - BYT_RT5640_SSP0_AIF1), + BYT_RT5640_JD_SRC_JD1_IN4P | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_1P0 | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), }, { /* Toshiba Satellite Click Mini L9W-B */ .matches = { diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c index a22f97234201..5f1bf6d3800c 100644 --- a/sound/soc/intel/boards/cml_rt1011_rt5682.c +++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c @@ -11,7 +11,6 @@ #include <linux/clk.h> #include <linux/dmi.h> #include <linux/slab.h> -#include <asm/cpu_device_id.h> #include <linux/acpi.h> #include <sound/core.h> #include <sound/jack.h> diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c index 5d08ae066738..fb9ba8819706 100644 --- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c @@ -9,45 +9,52 @@ #include <sound/soc-acpi.h> #include <sound/soc-acpi-intel-match.h> -static struct snd_soc_acpi_codecs cml_codecs = { +static struct snd_soc_acpi_codecs rt1011_spk_codecs = { .num_codecs = 1, - .codecs = {"10EC5682"} + .codecs = {"10EC1011"} }; -static struct snd_soc_acpi_codecs cml_spk_codecs = { +static struct snd_soc_acpi_codecs max98357a_spk_codecs = { .num_codecs = 1, .codecs = {"MX98357A"} }; +/* + * The order of the three entries with .id = "10EC5682" matters + * here, because DSDT tables expose an ACPI HID for the MAX98357A + * speaker amplifier which is not populated on the board. + */ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = { { - .id = "DLGS7219", - .drv_name = "cml_da7219_max98357a", - .quirk_data = &cml_spk_codecs, + .id = "10EC5682", + .drv_name = "cml_rt1011_rt5682", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &rt1011_spk_codecs, .sof_fw_filename = "sof-cml.ri", - .sof_tplg_filename = "sof-cml-da7219-max98357a.tplg", + .sof_tplg_filename = "sof-cml-rt1011-rt5682.tplg", }, { - .id = "MX98357A", + .id = "10EC5682", .drv_name = "sof_rt5682", - .quirk_data = &cml_codecs, + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &max98357a_spk_codecs, .sof_fw_filename = "sof-cml.ri", .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg", }, { - .id = "10EC1011", - .drv_name = "cml_rt1011_rt5682", - .quirk_data = &cml_codecs, - .sof_fw_filename = "sof-cml.ri", - .sof_tplg_filename = "sof-cml-rt1011-rt5682.tplg", - }, - { .id = "10EC5682", .drv_name = "sof_rt5682", .sof_fw_filename = "sof-cml.ri", .sof_tplg_filename = "sof-cml-rt5682.tplg", }, - + { + .id = "DLGS7219", + .drv_name = "cml_da7219_max98357a", + .machine_quirk = snd_soc_acpi_codec_list, + .quirk_data = &max98357a_spk_codecs, + .sof_fw_filename = "sof-cml.ri", + .sof_tplg_filename = "sof-cml-da7219-max98357a.tplg", + }, {}, }; EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines); diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 9054558ce386..b94680fb26fa 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -539,6 +539,9 @@ void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd) struct snd_soc_rtdcom_list *rtdcom; struct snd_soc_component *component; + if (!rtd->pcm) + return; + for_each_rtd_components(rtd, rtdcom, component) if (component->driver->pcm_destruct) component->driver->pcm_destruct(component, rtd->pcm); diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 61f230324164..6615ef64c7f5 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -214,10 +214,8 @@ be_err: * This is to ensure there are no pops or clicks in between any music tracks * due to DAPM power cycling. */ -static void close_delayed_work(struct work_struct *work) +static void close_delayed_work(struct snd_soc_pcm_runtime *rtd) { - struct snd_soc_pcm_runtime *rtd = - container_of(work, struct snd_soc_pcm_runtime, delayed_work.work); struct snd_soc_dai *codec_dai = rtd->codec_dai; mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass); @@ -929,7 +927,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) } /* DAPM dai link stream work */ - INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work); + rtd->close_delayed_work_func = close_delayed_work; rtd->compr = compr; compr->private_data = rtd; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 062653ab03a3..8ef0efeed0a7 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -419,7 +419,8 @@ static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd) list_del(&rtd->list); - flush_delayed_work(&rtd->delayed_work); + if (delayed_work_pending(&rtd->delayed_work)) + flush_delayed_work(&rtd->delayed_work); snd_soc_pcm_component_free(rtd); /* @@ -435,6 +436,15 @@ static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd) device_unregister(rtd->dev); } +static void close_delayed_work(struct work_struct *work) { + struct snd_soc_pcm_runtime *rtd = + container_of(work, struct snd_soc_pcm_runtime, + delayed_work.work); + + if (rtd->close_delayed_work_func) + rtd->close_delayed_work_func(rtd); +} + static struct snd_soc_pcm_runtime *soc_new_pcm_runtime( struct snd_soc_card *card, struct snd_soc_dai_link *dai_link) { @@ -469,7 +479,14 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime( goto free_rtd; rtd->dev = dev; + INIT_LIST_HEAD(&rtd->list); + INIT_LIST_HEAD(&rtd->component_list); + INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients); + INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients); + INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients); + INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients); dev_set_drvdata(dev, rtd); + INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work); /* * for rtd->codec_dais @@ -483,12 +500,6 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime( /* * rtd remaining settings */ - INIT_LIST_HEAD(&rtd->component_list); - INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients); - INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients); - INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients); - INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients); - rtd->card = card; rtd->dai_link = dai_link; if (!rtd->dai_link->ops) @@ -1860,6 +1871,8 @@ match: /* convert non BE into BE */ dai_link->no_pcm = 1; + dai_link->dpcm_playback = 1; + dai_link->dpcm_capture = 1; /* override any BE fixups */ dai_link->be_hw_params_fixup = diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 76b7ee637e86..01e7bc03d92f 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -637,10 +637,8 @@ out: * This is to ensure there are no pops or clicks in between any music tracks * due to DAPM power cycling. */ -static void close_delayed_work(struct work_struct *work) +static void close_delayed_work(struct snd_soc_pcm_runtime *rtd) { - struct snd_soc_pcm_runtime *rtd = - container_of(work, struct snd_soc_pcm_runtime, delayed_work.work); struct snd_soc_dai *codec_dai = rtd->codec_dais[0]; mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass); @@ -660,7 +658,7 @@ static void close_delayed_work(struct work_struct *work) mutex_unlock(&rtd->card->pcm_mutex); } -static void codec2codec_close_delayed_work(struct work_struct *work) +static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd) { /* * Currently nothing to do for c2c links @@ -2974,10 +2972,9 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num) /* DAPM dai link stream work */ if (rtd->dai_link->params) - INIT_DELAYED_WORK(&rtd->delayed_work, - codec2codec_close_delayed_work); + rtd->close_delayed_work_func = codec2codec_close_delayed_work; else - INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work); + rtd->close_delayed_work_func = close_delayed_work; pcm->nonatomic = rtd->dai_link->nonatomic; rtd->pcm = pcm; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 81d2af000a5c..92e4f4d08bfa 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -548,12 +548,12 @@ static void remove_link(struct snd_soc_component *comp, if (dobj->ops && dobj->ops->link_unload) dobj->ops->link_unload(comp, dobj); + list_del(&dobj->list); + snd_soc_remove_dai_link(comp->card, link); + kfree(link->name); kfree(link->stream_name); kfree(link->cpus->dai_name); - - list_del(&dobj->list); - snd_soc_remove_dai_link(comp->card, link); kfree(link); } @@ -1933,11 +1933,13 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg, ret = soc_tplg_dai_link_load(tplg, link, NULL); if (ret < 0) { dev_err(tplg->comp->dev, "ASoC: FE link loading failed\n"); - kfree(link->name); - kfree(link->stream_name); - kfree(link->cpus->dai_name); - kfree(link); - return ret; + goto err; + } + + ret = snd_soc_add_dai_link(tplg->comp->card, link); + if (ret < 0) { + dev_err(tplg->comp->dev, "ASoC: adding FE link failed\n"); + goto err; } link->dobj.index = tplg->index; @@ -1945,8 +1947,13 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg, link->dobj.type = SND_SOC_DOBJ_DAI_LINK; list_add(&link->dobj.list, &tplg->comp->dobj_list); - snd_soc_add_dai_link(tplg->comp->card, link); return 0; +err: + kfree(link->name); + kfree(link->stream_name); + kfree(link->cpus->dai_name); + kfree(link); + return ret; } /* create a FE DAI and DAI link from the PCM object */ @@ -2039,6 +2046,7 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg, int size; int i; bool abi_match; + int ret; count = le32_to_cpu(hdr->count); @@ -2080,7 +2088,12 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg, } /* create the FE DAIs and DAI links */ - soc_tplg_pcm_create(tplg, _pcm); + ret = soc_tplg_pcm_create(tplg, _pcm); + if (ret < 0) { + if (!abi_match) + kfree(_pcm); + return ret; + } /* offset by version-specific struct size and * real priv data size diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c index cfefcfd92798..aef6ca167b9c 100644 --- a/sound/soc/sof/imx/imx8.c +++ b/sound/soc/sof/imx/imx8.c @@ -209,7 +209,7 @@ static int imx8_probe(struct snd_sof_dev *sdev) priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains, sizeof(*priv->pd_dev), GFP_KERNEL); - if (!priv) + if (!priv->pd_dev) return -ENOMEM; priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains, @@ -304,6 +304,9 @@ static int imx8_probe(struct snd_sof_dev *sdev) } sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM; + /* set default mailbox offset for FW ready message */ + sdev->dsp_box.offset = MBOX_OFFSET; + return 0; exit_pdev_unregister: diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c index 2abf80b3eb52..92ef6a796fd5 100644 --- a/sound/soc/sof/intel/byt.c +++ b/sound/soc/sof/intel/byt.c @@ -24,7 +24,8 @@ #define DRAM_OFFSET 0x100000 #define DRAM_SIZE (160 * 1024) #define SHIM_OFFSET 0x140000 -#define SHIM_SIZE 0x100 +#define SHIM_SIZE_BYT 0x100 +#define SHIM_SIZE_CHT 0x118 #define MBOX_OFFSET 0x144000 #define MBOX_SIZE 0x1000 #define EXCEPT_OFFSET 0x800 @@ -75,7 +76,7 @@ static const struct snd_sof_debugfs_map byt_debugfs[] = { SOF_DEBUGFS_ACCESS_D0_ONLY}, {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE, SOF_DEBUGFS_ACCESS_D0_ONLY}, - {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE, + {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT, SOF_DEBUGFS_ACCESS_ALWAYS}, }; @@ -102,7 +103,7 @@ static const struct snd_sof_debugfs_map cht_debugfs[] = { SOF_DEBUGFS_ACCESS_D0_ONLY}, {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE, SOF_DEBUGFS_ACCESS_D0_ONLY}, - {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE, + {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT, SOF_DEBUGFS_ACCESS_ALWAYS}, }; @@ -145,33 +146,33 @@ static void byt_dump(struct snd_sof_dev *sdev, u32 flags) struct sof_ipc_dsp_oops_xtensa xoops; struct sof_ipc_panic_info panic_info; u32 stack[BYT_STACK_DUMP_SIZE]; - u32 status, panic, imrd, imrx; + u64 status, panic, imrd, imrx; /* now try generic SOF status messages */ - status = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IPCD); - panic = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IPCX); + status = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD); + panic = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX); byt_get_registers(sdev, &xoops, &panic_info, stack, BYT_STACK_DUMP_SIZE); snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack, BYT_STACK_DUMP_SIZE); /* provide some context for firmware debug */ - imrx = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IMRX); - imrd = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IMRD); + imrx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRX); + imrd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRD); dev_err(sdev->dev, - "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n", + "error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n", (panic & SHIM_IPCX_BUSY) ? "yes" : "no", (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic); dev_err(sdev->dev, - "error: mask host: pending %s complete %s raw 0x%8.8x\n", + "error: mask host: pending %s complete %s raw 0x%llx\n", (imrx & SHIM_IMRX_BUSY) ? "yes" : "no", (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx); dev_err(sdev->dev, - "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n", + "error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n", (status & SHIM_IPCD_BUSY) ? "yes" : "no", (status & SHIM_IPCD_DONE) ? "yes" : "no", status); dev_err(sdev->dev, - "error: mask DSP: pending %s complete %s raw 0x%8.8x\n", + "error: mask DSP: pending %s complete %s raw 0x%llx\n", (imrd & SHIM_IMRD_BUSY) ? "yes" : "no", (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd); diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c index 827f84a0722e..fbfa225d1c5a 100644 --- a/sound/soc/sof/intel/hda-codec.c +++ b/sound/soc/sof/intel/hda-codec.c @@ -24,19 +24,18 @@ #define IDISP_VID_INTEL 0x80860000 /* load the legacy HDA codec driver */ -#ifdef MODULE -static void hda_codec_load_module(struct hda_codec *codec) +static int hda_codec_load_module(struct hda_codec *codec) { +#ifdef MODULE char alias[MODULE_NAME_LEN]; const char *module = alias; snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias)); dev_dbg(&codec->core.dev, "loading codec module: %s\n", module); request_module(module); -} -#else -static void hda_codec_load_module(struct hda_codec *codec) {} #endif + return device_attach(hda_codec_dev(codec)); +} /* enable controller wake up event for all codecs with jack connectors */ void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) @@ -129,10 +128,16 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address) if ((mach_params && mach_params->common_hdmi_codec_drv) || (resp & 0xFFFF0000) != IDISP_VID_INTEL) { hdev->type = HDA_DEV_LEGACY; - hda_codec_load_module(&hda_priv->codec); + ret = hda_codec_load_module(&hda_priv->codec); + /* + * handle ret==0 (no driver bound) as an error, but pass + * other return codes without modification + */ + if (ret == 0) + ret = -ENOENT; } - return 0; + return ret; #else hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL); if (!hdev) diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index 8796f385be76..896d21984b73 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -216,6 +216,8 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, link_dev = hda_link_stream_assign(bus, substream); if (!link_dev) return -EBUSY; + + snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev); } stream_tag = hdac_stream(link_dev)->stream_tag; @@ -228,8 +230,6 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; - snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev); - link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name); if (!link) return -EINVAL; @@ -361,6 +361,13 @@ static int hda_link_hw_free(struct snd_pcm_substream *substream, bus = hstream->bus; rtd = snd_pcm_substream_chip(substream); link_dev = snd_soc_dai_get_dma_data(dai, substream); + + if (!link_dev) { + dev_dbg(dai->dev, + "%s: link_dev is not assigned\n", __func__); + return -EINVAL; + } + hda_stream = hstream_to_sof_hda_stream(link_dev); /* free the link DMA channel in the FW */ diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c index b1783360fe10..bae7ac3581e5 100644 --- a/sound/soc/sof/intel/hda-loader.c +++ b/sound/soc/sof/intel/hda-loader.c @@ -329,13 +329,13 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev) if (!ret) break; - dev_err(sdev->dev, "error: Error code=0x%x: FW status=0x%x\n", + dev_dbg(sdev->dev, "iteration %d of Core En/ROM load failed: %d\n", + i, ret); + dev_dbg(sdev->dev, "Error code=0x%x: FW status=0x%x\n", snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_ERROR), snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_STATUS)); - dev_err(sdev->dev, "error: iteration %d of Core En/ROM load failed: %d\n", - i, ret); } if (i == HDA_FW_BOOT_ATTEMPTS) { diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c index 5994e1073364..5fdfbaa8c4ed 100644 --- a/sound/soc/sof/ipc.c +++ b/sound/soc/sof/ipc.c @@ -826,6 +826,9 @@ void snd_sof_ipc_free(struct snd_sof_dev *sdev) { struct snd_sof_ipc *ipc = sdev->ipc; + if (!ipc) + return; + /* disable sending of ipc's */ mutex_lock(&ipc->tx_mutex); ipc->disable_ipc_tx = true; diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c index 9a9a381a908d..432d12bd4937 100644 --- a/sound/soc/sof/loader.c +++ b/sound/soc/sof/loader.c @@ -50,8 +50,7 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset) while (ext_hdr->hdr.cmd == SOF_IPC_FW_READY) { /* read in ext structure */ - offset += sizeof(*ext_hdr); - snd_sof_dsp_block_read(sdev, bar, offset, + snd_sof_dsp_block_read(sdev, bar, offset + sizeof(*ext_hdr), (void *)((u8 *)ext_data + sizeof(*ext_hdr)), ext_hdr->hdr.size - sizeof(*ext_hdr)); @@ -61,11 +60,15 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset) /* process structure data */ switch (ext_hdr->type) { case SOF_IPC_EXT_DMA_BUFFER: + ret = 0; break; case SOF_IPC_EXT_WINDOW: ret = get_ext_windows(sdev, ext_hdr); break; default: + dev_warn(sdev->dev, "warning: unknown ext header type %d size 0x%x\n", + ext_hdr->type, ext_hdr->hdr.size); + ret = 0; break; } diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index d82ab981e840..e20b806ec80f 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -3132,7 +3132,9 @@ found: case SOF_DAI_INTEL_SSP: case SOF_DAI_INTEL_DMIC: case SOF_DAI_INTEL_ALH: - /* no resource needs to be released for SSP, DMIC and ALH */ + case SOF_DAI_IMX_SAI: + case SOF_DAI_IMX_ESAI: + /* no resource needs to be released for all cases above */ break; case SOF_DAI_INTEL_HDA: ret = sof_link_hda_unload(sdev, link); diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index 48ea915b24ba..2ed92c990b97 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c @@ -226,7 +226,6 @@ static void uni_player_set_channel_status(struct uniperif *player, * sampling frequency. If no sample rate is already specified, then * set one. */ - mutex_lock(&player->ctrl_lock); if (runtime) { switch (runtime->rate) { case 22050: @@ -303,7 +302,6 @@ static void uni_player_set_channel_status(struct uniperif *player, player->stream_settings.iec958.status[3 + (n * 4)] << 24; SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status); } - mutex_unlock(&player->ctrl_lock); /* Update the channel status */ if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) @@ -365,8 +363,10 @@ static int uni_player_prepare_iec958(struct uniperif *player, SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player); + mutex_lock(&player->ctrl_lock); /* Update the channel status */ uni_player_set_channel_status(player, runtime); + mutex_unlock(&player->ctrl_lock); /* Clear the user validity user bits */ SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0); @@ -598,7 +598,6 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol, iec958->status[1] = ucontrol->value.iec958.status[1]; iec958->status[2] = ucontrol->value.iec958.status[2]; iec958->status[3] = ucontrol->value.iec958.status[3]; - mutex_unlock(&player->ctrl_lock); spin_lock_irqsave(&player->irq_lock, flags); if (player->substream && player->substream->runtime) @@ -608,6 +607,8 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol, uni_player_set_channel_status(player, NULL); spin_unlock_irqrestore(&player->irq_lock, flags); + mutex_unlock(&player->ctrl_lock); + return 0; } diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c index 81c407da15c5..08696a4adb69 100644 --- a/sound/soc/stm/stm32_adfsdm.c +++ b/sound/soc/stm/stm32_adfsdm.c @@ -153,13 +153,13 @@ static const struct snd_soc_component_driver stm32_adfsdm_dai_component = { .name = "stm32_dfsdm_audio", }; -static void memcpy_32to16(void *dest, const void *src, size_t n) +static void stm32_memcpy_32to16(void *dest, const void *src, size_t n) { unsigned int i = 0; u16 *d = (u16 *)dest, *s = (u16 *)src; s++; - for (i = n; i > 0; i--) { + for (i = n >> 1; i > 0; i--) { *d++ = *s++; s++; } @@ -186,8 +186,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private) if ((priv->pos + src_size) > buff_size) { if (format == SNDRV_PCM_FORMAT_S16_LE) - memcpy_32to16(&pcm_buff[priv->pos], src_buff, - buff_size - priv->pos); + stm32_memcpy_32to16(&pcm_buff[priv->pos], src_buff, + buff_size - priv->pos); else memcpy(&pcm_buff[priv->pos], src_buff, buff_size - priv->pos); @@ -196,8 +196,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private) } if (format == SNDRV_PCM_FORMAT_S16_LE) - memcpy_32to16(&pcm_buff[priv->pos], - &src_buff[src_size - cur_size], cur_size); + stm32_memcpy_32to16(&pcm_buff[priv->pos], + &src_buff[src_size - cur_size], cur_size); else memcpy(&pcm_buff[priv->pos], &src_buff[src_size - cur_size], cur_size); diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index 48e629ac2d88..30bcd5d3a32a 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -184,6 +184,56 @@ static bool stm32_sai_sub_writeable_reg(struct device *dev, unsigned int reg) } } +static int stm32_sai_sub_reg_up(struct stm32_sai_sub_data *sai, + unsigned int reg, unsigned int mask, + unsigned int val) +{ + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; + + ret = regmap_update_bits(sai->regmap, reg, mask, val); + + clk_disable(sai->pdata->pclk); + + return ret; +} + +static int stm32_sai_sub_reg_wr(struct stm32_sai_sub_data *sai, + unsigned int reg, unsigned int mask, + unsigned int val) +{ + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; + + ret = regmap_write_bits(sai->regmap, reg, mask, val); + + clk_disable(sai->pdata->pclk); + + return ret; +} + +static int stm32_sai_sub_reg_rd(struct stm32_sai_sub_data *sai, + unsigned int reg, unsigned int *val) +{ + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; + + ret = regmap_read(sai->regmap, reg, val); + + clk_disable(sai->pdata->pclk); + + return ret; +} + static const struct regmap_config stm32_sai_sub_regmap_config_f4 = { .reg_bits = 32, .reg_stride = 4, @@ -295,7 +345,7 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai, mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version)); cr1 = SAI_XCR1_MCKDIV_SET(div); - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, mask, cr1); if (ret < 0) dev_err(&sai->pdev->dev, "Failed to update CR1 register\n"); @@ -372,8 +422,8 @@ static int stm32_sai_mclk_enable(struct clk_hw *hw) dev_dbg(&sai->pdev->dev, "Enable master clock\n"); - return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_MCKEN, SAI_XCR1_MCKEN); + return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_MCKEN, SAI_XCR1_MCKEN); } static void stm32_sai_mclk_disable(struct clk_hw *hw) @@ -383,7 +433,7 @@ static void stm32_sai_mclk_disable(struct clk_hw *hw) dev_dbg(&sai->pdev->dev, "Disable master clock\n"); - regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0); + stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0); } static const struct clk_ops mclk_ops = { @@ -446,15 +496,15 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid) unsigned int sr, imr, flags; snd_pcm_state_t status = SNDRV_PCM_STATE_RUNNING; - regmap_read(sai->regmap, STM_SAI_IMR_REGX, &imr); - regmap_read(sai->regmap, STM_SAI_SR_REGX, &sr); + stm32_sai_sub_reg_rd(sai, STM_SAI_IMR_REGX, &imr); + stm32_sai_sub_reg_rd(sai, STM_SAI_SR_REGX, &sr); flags = sr & imr; if (!flags) return IRQ_NONE; - regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK, - SAI_XCLRFR_MASK); + stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK, + SAI_XCLRFR_MASK); if (!sai->substream) { dev_err(&pdev->dev, "Device stopped. Spurious IRQ 0x%x\n", sr); @@ -503,8 +553,8 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai, int ret; if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) { - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_NODIV, + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_NODIV, freq ? 0 : SAI_XCR1_NODIV); if (ret < 0) return ret; @@ -583,7 +633,7 @@ static int stm32_sai_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask, slotr_mask |= SAI_XSLOTR_SLOTEN_MASK; - regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, slotr_mask, slotr); + stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, slotr_mask, slotr); sai->slot_width = slot_width; sai->slots = slots; @@ -665,7 +715,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) cr1_mask |= SAI_XCR1_CKSTR; frcr_mask |= SAI_XFRCR_FSPOL; - regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr); + stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr); /* DAI clock master masks */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { @@ -693,7 +743,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) cr1_mask |= SAI_XCR1_SLAVE; conf_update: - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1); if (ret < 0) { dev_err(cpu_dai->dev, "Failed to update CR1 register\n"); return ret; @@ -730,12 +780,12 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream, } /* Enable ITs */ - regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX, - SAI_XCLRFR_MASK, SAI_XCLRFR_MASK); + stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX, + SAI_XCLRFR_MASK, SAI_XCLRFR_MASK); imr = SAI_XIMR_OVRUDRIE; if (STM_SAI_IS_CAPTURE(sai)) { - regmap_read(sai->regmap, STM_SAI_CR2_REGX, &cr2); + stm32_sai_sub_reg_rd(sai, STM_SAI_CR2_REGX, &cr2); if (cr2 & SAI_XCR2_MUTECNT_MASK) imr |= SAI_XIMR_MUTEDETIE; } @@ -745,8 +795,8 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream, else imr |= SAI_XIMR_AFSDETIE | SAI_XIMR_LFSDETIE; - regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, - SAI_XIMR_MASK, imr); + stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, + SAI_XIMR_MASK, imr); return 0; } @@ -763,10 +813,10 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai, * SAI fifo threshold is set to half fifo, to keep enough space * for DMA incoming bursts. */ - regmap_write_bits(sai->regmap, STM_SAI_CR2_REGX, - SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK, - SAI_XCR2_FFLUSH | - SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF)); + stm32_sai_sub_reg_wr(sai, STM_SAI_CR2_REGX, + SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK, + SAI_XCR2_FFLUSH | + SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF)); /* DS bits in CR1 not set for SPDIF (size forced to 24 bits).*/ if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) { @@ -795,7 +845,7 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai, if ((sai->slots == 2) && (params_channels(params) == 1)) cr1 |= SAI_XCR1_MONO; - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1); if (ret < 0) { dev_err(cpu_dai->dev, "Failed to update CR1 register\n"); return ret; @@ -809,7 +859,7 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai) struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); int slotr, slot_sz; - regmap_read(sai->regmap, STM_SAI_SLOTR_REGX, &slotr); + stm32_sai_sub_reg_rd(sai, STM_SAI_SLOTR_REGX, &slotr); /* * If SLOTSZ is set to auto in SLOTR, align slot width on data size @@ -831,16 +881,16 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai) sai->slots = 2; /* The number of slots in the audio frame is equal to NBSLOT[3:0] + 1*/ - regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, - SAI_XSLOTR_NBSLOT_MASK, - SAI_XSLOTR_NBSLOT_SET((sai->slots - 1))); + stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, + SAI_XSLOTR_NBSLOT_MASK, + SAI_XSLOTR_NBSLOT_SET((sai->slots - 1))); /* Set default slots mask if not already set from DT */ if (!(slotr & SAI_XSLOTR_SLOTEN_MASK)) { sai->slot_mask = (1 << sai->slots) - 1; - regmap_update_bits(sai->regmap, - STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK, - SAI_XSLOTR_SLOTEN_SET(sai->slot_mask)); + stm32_sai_sub_reg_up(sai, + STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK, + SAI_XSLOTR_SLOTEN_SET(sai->slot_mask)); } dev_dbg(cpu_dai->dev, "Slots %d, slot width %d\n", @@ -870,14 +920,14 @@ static void stm32_sai_set_frame(struct snd_soc_dai *cpu_dai) dev_dbg(cpu_dai->dev, "Frame length %d, frame active %d\n", sai->fs_length, fs_active); - regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr); + stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr); if ((sai->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_LSB) { offset = sai->slot_width - sai->data_size; - regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, - SAI_XSLOTR_FBOFF_MASK, - SAI_XSLOTR_FBOFF_SET(offset)); + stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, + SAI_XSLOTR_FBOFF_MASK, + SAI_XSLOTR_FBOFF_SET(offset)); } } @@ -994,9 +1044,9 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai, return -EINVAL; } - regmap_update_bits(sai->regmap, - STM_SAI_CR1_REGX, - SAI_XCR1_OSR, cr1); + stm32_sai_sub_reg_up(sai, + STM_SAI_CR1_REGX, + SAI_XCR1_OSR, cr1); div = stm32_sai_get_clk_div(sai, sai_clk_rate, sai->mclk_rate); @@ -1058,12 +1108,12 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: dev_dbg(cpu_dai->dev, "Enable DMA and SAI\n"); - regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_DMAEN, SAI_XCR1_DMAEN); + stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_DMAEN, SAI_XCR1_DMAEN); /* Enable SAI */ - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_SAIEN, SAI_XCR1_SAIEN); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_SAIEN, SAI_XCR1_SAIEN); if (ret < 0) dev_err(cpu_dai->dev, "Failed to update CR1 register\n"); break; @@ -1072,16 +1122,16 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_STOP: dev_dbg(cpu_dai->dev, "Disable DMA and SAI\n"); - regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, - SAI_XIMR_MASK, 0); + stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, + SAI_XIMR_MASK, 0); - regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_SAIEN, - (unsigned int)~SAI_XCR1_SAIEN); + stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_SAIEN, + (unsigned int)~SAI_XCR1_SAIEN); - ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, - SAI_XCR1_DMAEN, - (unsigned int)~SAI_XCR1_DMAEN); + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_DMAEN, + (unsigned int)~SAI_XCR1_DMAEN); if (ret < 0) dev_err(cpu_dai->dev, "Failed to update CR1 register\n"); @@ -1101,7 +1151,7 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream, struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); unsigned long flags; - regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0); + stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0); clk_disable_unprepare(sai->sai_ck); @@ -1169,7 +1219,7 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai) cr1_mask |= SAI_XCR1_SYNCEN_MASK; cr1 |= SAI_XCR1_SYNCEN_SET(sai->sync); - return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1); + return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1); } static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops = { @@ -1322,8 +1372,13 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev, if (STM_SAI_HAS_PDM(sai) && STM_SAI_IS_SUB_A(sai)) sai->regmap_config = &stm32_sai_sub_regmap_config_h7; - sai->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "sai_ck", - base, sai->regmap_config); + /* + * Do not manage peripheral clock through regmap framework as this + * can lead to circular locking issue with sai master clock provider. + * Manage peripheral clock directly in driver instead. + */ + sai->regmap = devm_regmap_init_mmio(&pdev->dev, base, + sai->regmap_config); if (IS_ERR(sai->regmap)) { dev_err(&pdev->dev, "Failed to initialize MMIO\n"); return PTR_ERR(sai->regmap); @@ -1420,6 +1475,10 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev, return PTR_ERR(sai->sai_ck); } + ret = clk_prepare(sai->pdata->pclk); + if (ret < 0) + return ret; + if (STM_SAI_IS_F4(sai->pdata)) return 0; @@ -1501,22 +1560,48 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) return 0; } +static int stm32_sai_sub_remove(struct platform_device *pdev) +{ + struct stm32_sai_sub_data *sai = dev_get_drvdata(&pdev->dev); + + clk_unprepare(sai->pdata->pclk); + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int stm32_sai_sub_suspend(struct device *dev) { struct stm32_sai_sub_data *sai = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; regcache_cache_only(sai->regmap, true); regcache_mark_dirty(sai->regmap); + + clk_disable(sai->pdata->pclk); + return 0; } static int stm32_sai_sub_resume(struct device *dev) { struct stm32_sai_sub_data *sai = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(sai->pdata->pclk); + if (ret < 0) + return ret; regcache_cache_only(sai->regmap, false); - return regcache_sync(sai->regmap); + ret = regcache_sync(sai->regmap); + + clk_disable(sai->pdata->pclk); + + return ret; } #endif /* CONFIG_PM_SLEEP */ @@ -1531,6 +1616,7 @@ static struct platform_driver stm32_sai_sub_driver = { .pm = &stm32_sai_sub_pm_ops, }, .probe = stm32_sai_sub_probe, + .remove = stm32_sai_sub_remove, }; module_platform_driver(stm32_sai_sub_driver); diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c index 3fd28ee01675..3769d9ce5dbe 100644 --- a/sound/soc/stm/stm32_spdifrx.c +++ b/sound/soc/stm/stm32_spdifrx.c @@ -12,7 +12,6 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/of_platform.h> -#include <linux/pinctrl/consumer.h> #include <linux/regmap.h> #include <linux/reset.h> @@ -220,6 +219,7 @@ * @slave_config: dma slave channel runtime config pointer * @phys_addr: SPDIFRX registers physical base address * @lock: synchronization enabling lock + * @irq_lock: prevent race condition with IRQ on stream state * @cs: channel status buffer * @ub: user data buffer * @irq: SPDIFRX interrupt line @@ -240,6 +240,7 @@ struct stm32_spdifrx_data { struct dma_slave_config slave_config; dma_addr_t phys_addr; spinlock_t lock; /* Sync enabling lock */ + spinlock_t irq_lock; /* Prevent race condition on stream state */ unsigned char cs[SPDIFRX_CS_BYTES_NB]; unsigned char ub[SPDIFRX_UB_BYTES_NB]; int irq; @@ -320,6 +321,7 @@ static void stm32_spdifrx_dma_ctrl_stop(struct stm32_spdifrx_data *spdifrx) static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) { int cr, cr_mask, imr, ret; + unsigned long flags; /* Enable IRQs */ imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE; @@ -327,7 +329,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) if (ret) return ret; - spin_lock(&spdifrx->lock); + spin_lock_irqsave(&spdifrx->lock, flags); spdifrx->refcount++; @@ -362,7 +364,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) "Failed to start synchronization\n"); } - spin_unlock(&spdifrx->lock); + spin_unlock_irqrestore(&spdifrx->lock, flags); return ret; } @@ -370,11 +372,12 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx) { int cr, cr_mask, reg; + unsigned long flags; - spin_lock(&spdifrx->lock); + spin_lock_irqsave(&spdifrx->lock, flags); if (--spdifrx->refcount) { - spin_unlock(&spdifrx->lock); + spin_unlock_irqrestore(&spdifrx->lock, flags); return; } @@ -393,7 +396,7 @@ static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx) regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, ®); regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, ®); - spin_unlock(&spdifrx->lock); + spin_unlock_irqrestore(&spdifrx->lock, flags); } static int stm32_spdifrx_dma_ctrl_register(struct device *dev, @@ -480,8 +483,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx) memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB); memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB); - pinctrl_pm_select_default_state(&spdifrx->pdev->dev); - ret = stm32_spdifrx_dma_ctrl_start(spdifrx); if (ret < 0) return ret; @@ -513,7 +514,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx) end: clk_disable_unprepare(spdifrx->kclk); - pinctrl_pm_select_sleep_state(&spdifrx->pdev->dev); return ret; } @@ -665,7 +665,6 @@ static const struct regmap_config stm32_h7_spdifrx_regmap_conf = { static irqreturn_t stm32_spdifrx_isr(int irq, void *devid) { struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid; - struct snd_pcm_substream *substream = spdifrx->substream; struct platform_device *pdev = spdifrx->pdev; unsigned int cr, mask, sr, imr; unsigned int flags, sync_state; @@ -745,14 +744,19 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid) return IRQ_HANDLED; } - if (substream) - snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); + spin_lock(&spdifrx->irq_lock); + if (spdifrx->substream) + snd_pcm_stop(spdifrx->substream, + SNDRV_PCM_STATE_DISCONNECTED); + spin_unlock(&spdifrx->irq_lock); return IRQ_HANDLED; } - if (err_xrun && substream) - snd_pcm_stop_xrun(substream); + spin_lock(&spdifrx->irq_lock); + if (err_xrun && spdifrx->substream) + snd_pcm_stop_xrun(spdifrx->substream); + spin_unlock(&spdifrx->irq_lock); return IRQ_HANDLED; } @@ -761,9 +765,12 @@ static int stm32_spdifrx_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai); + unsigned long flags; int ret; + spin_lock_irqsave(&spdifrx->irq_lock, flags); spdifrx->substream = substream; + spin_unlock_irqrestore(&spdifrx->irq_lock, flags); ret = clk_prepare_enable(spdifrx->kclk); if (ret) @@ -839,8 +846,12 @@ static void stm32_spdifrx_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai); + unsigned long flags; + spin_lock_irqsave(&spdifrx->irq_lock, flags); spdifrx->substream = NULL; + spin_unlock_irqrestore(&spdifrx->irq_lock, flags); + clk_disable_unprepare(spdifrx->kclk); } @@ -944,6 +955,7 @@ static int stm32_spdifrx_probe(struct platform_device *pdev) spdifrx->pdev = pdev; init_completion(&spdifrx->cs_completion); spin_lock_init(&spdifrx->lock); + spin_lock_init(&spdifrx->irq_lock); platform_set_drvdata(pdev, spdifrx); diff --git a/sound/usb/card.h b/sound/usb/card.h index 2991b9986f66..395403a2d33f 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -145,6 +145,7 @@ struct snd_usb_substream { struct snd_usb_endpoint *sync_endpoint; unsigned long flags; bool need_setup_ep; /* (re)configure EP at prepare? */ + bool need_setup_fmt; /* (re)configure fmt after resume? */ unsigned int speed; /* USB_SPEED_XXX */ u64 formats; /* format bitmasks (all or'ed) */ diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 9c8930bb00c8..0e4eab96e23e 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -370,7 +370,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, add_sync_ep_from_ifnum: iface = usb_ifnum_to_if(dev, ifnum); - if (!iface || iface->num_altsetting == 0) + if (!iface || iface->num_altsetting < 2) return -EINVAL; alts = &iface->altsetting[1]; @@ -506,15 +506,15 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) if (WARN_ON(!iface)) return -EINVAL; alts = usb_altnum_to_altsetting(iface, fmt->altsetting); - altsd = get_iface_desc(alts); - if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting)) + if (WARN_ON(!alts)) return -EINVAL; + altsd = get_iface_desc(alts); - if (fmt == subs->cur_audiofmt) + if (fmt == subs->cur_audiofmt && !subs->need_setup_fmt) return 0; /* close the old interface */ - if (subs->interface >= 0 && subs->interface != fmt->iface) { + if (subs->interface >= 0 && (subs->interface != fmt->iface || subs->need_setup_fmt)) { if (!subs->stream->chip->keep_iface) { err = usb_set_interface(subs->dev, subs->interface, 0); if (err < 0) { @@ -528,6 +528,9 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) subs->altset_idx = 0; } + if (subs->need_setup_fmt) + subs->need_setup_fmt = false; + /* set interface */ if (iface->cur_altsetting != alts) { err = snd_usb_select_mode_quirk(subs, fmt); @@ -1728,6 +1731,13 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea subs->data_endpoint->retire_data_urb = retire_playback_urb; subs->running = 0; return 0; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (subs->stream->chip->setup_fmt_after_resume_quirk) { + stop_endpoints(subs, true); + subs->need_setup_fmt = true; + return 0; + } + break; } return -EINVAL; @@ -1760,6 +1770,13 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream subs->data_endpoint->retire_data_urb = retire_capture_urb; subs->running = 1; return 0; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (subs->stream->chip->setup_fmt_after_resume_quirk) { + stop_endpoints(subs, true); + subs->need_setup_fmt = true; + return 0; + } + break; } return -EINVAL; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 70c338f3ae24..d187aa6d50db 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3466,7 +3466,8 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), .vendor_name = "Dell", .product_name = "WD19 Dock", .profile_name = "Dell-WD15-Dock", - .ifnum = QUIRK_NO_INTERFACE + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_SETUP_FMT_AFTER_RESUME } }, /* MOTU Microbook II */ diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 349e1e52996d..82184036437b 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -508,6 +508,16 @@ static int create_standard_mixer_quirk(struct snd_usb_audio *chip, return snd_usb_create_mixer(chip, quirk->ifnum, 0); } + +static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip, + struct usb_interface *iface, + struct usb_driver *driver, + const struct snd_usb_audio_quirk *quirk) +{ + chip->setup_fmt_after_resume_quirk = 1; + return 1; /* Continue with creating streams and mixer */ +} + /* * audio-interface quirks * @@ -546,6 +556,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip, [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk, [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk, + [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk, }; if (quirk->type < QUIRK_TYPE_COUNT) { @@ -1386,6 +1397,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ + case USB_ID(0x05a7, 0x1020): /* Bose Companion 5 */ case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index ff3cbf653de8..6fe3ab582ec6 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -33,7 +33,7 @@ struct snd_usb_audio { wait_queue_head_t shutdown_wait; unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ unsigned int tx_length_quirk:1; /* Put length specifier in transfers */ - + unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */ int num_interfaces; int num_suspended_intf; int sample_rate_read_error; @@ -98,6 +98,7 @@ enum quirk_type { QUIRK_AUDIO_EDIROL_UAXX, QUIRK_AUDIO_ALIGN_TRANSFER, QUIRK_AUDIO_STANDARD_MIXER, + QUIRK_SETUP_FMT_AFTER_RESUME, QUIRK_TYPE_COUNT }; diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 2769360f195c..03cd7c19a683 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -131,8 +131,9 @@ struct kvm_vcpu_events { struct { __u8 serror_pending; __u8 serror_has_esr; + __u8 ext_dabt_pending; /* Align it to 8 bytes */ - __u8 pad[6]; + __u8 pad[5]; __u64 serror_esr; } exception; __u32 reserved[12]; diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 67c21f9bdbad..820e5751ada7 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -164,8 +164,9 @@ struct kvm_vcpu_events { struct { __u8 serror_pending; __u8 serror_has_esr; + __u8 ext_dabt_pending; /* Align it to 8 bytes */ - __u8 pad[6]; + __u8 pad[5]; __u64 serror_esr; } exception; __u32 reserved[12]; @@ -323,6 +324,8 @@ struct kvm_vcpu_events { #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 +#define KVM_ARM_VCPU_PVTIME_CTRL 2 +#define KVM_ARM_VCPU_PVTIME_IPA 0 /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_VCPU2_SHIFT 28 diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index b0f72dea8b11..264e266a85bf 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h @@ -667,6 +667,8 @@ struct kvm_ppc_cpu_char { /* PPC64 eXternal Interrupt Controller Specification */ #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ +#define KVM_DEV_XICS_GRP_CTRL 2 +#define KVM_DEV_XICS_NR_SERVERS 1 /* Layout of 64-bit source attribute values */ #define KVM_XICS_DESTINATION_SHIFT 0 @@ -683,6 +685,7 @@ struct kvm_ppc_cpu_char { #define KVM_DEV_XIVE_GRP_CTRL 1 #define KVM_DEV_XIVE_RESET 1 #define KVM_DEV_XIVE_EQ_SYNC 2 +#define KVM_DEV_XIVE_NR_SERVERS 3 #define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */ #define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */ diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 0652d3eed9bd..e9b62498fe75 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -292,6 +292,7 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */ #define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ @@ -399,5 +400,7 @@ #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ +#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ +#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 20ce682a2540..084e98da04a7 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -93,6 +93,18 @@ * Microarchitectural Data * Sampling (MDS) vulnerabilities. */ +#define ARCH_CAP_PSCHANGE_MC_NO BIT(6) /* + * The processor is not susceptible to a + * machine check error due to modifying the + * code page size along with either the + * physical address or cache type + * without TLB invalidation. + */ +#define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */ +#define ARCH_CAP_TAA_NO BIT(8) /* + * Not susceptible to + * TSX Async Abort (TAA) vulnerabilities. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -103,6 +115,10 @@ #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e +#define MSR_IA32_TSX_CTRL 0x00000122 +#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ +#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ + #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 #define MSR_IA32_SYSENTER_EIP 0x00000176 @@ -393,6 +409,8 @@ #define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 +#define MSR_AMD_PPIN_CTL 0xc00102f0 +#define MSR_AMD_PPIN 0xc00102f1 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_BU_CFG2 0xc001102a diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S index 92748660ba51..df767afc690f 100644 --- a/tools/arch/x86/lib/memcpy_64.S +++ b/tools/arch/x86/lib/memcpy_64.S @@ -28,8 +28,8 @@ * Output: * rax original destination */ -ENTRY(__memcpy) -ENTRY(memcpy) +SYM_FUNC_START_ALIAS(__memcpy) +SYM_FUNC_START_LOCAL(memcpy) ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS @@ -41,8 +41,8 @@ ENTRY(memcpy) movl %edx, %ecx rep movsb ret -ENDPROC(memcpy) -ENDPROC(__memcpy) +SYM_FUNC_END(memcpy) +SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(__memcpy) @@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy) * memcpy_erms() - enhanced fast string memcpy. This is faster and * simpler than memcpy. Use memcpy_erms when possible. */ -ENTRY(memcpy_erms) +SYM_FUNC_START(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb ret -ENDPROC(memcpy_erms) +SYM_FUNC_END(memcpy_erms) -ENTRY(memcpy_orig) +SYM_FUNC_START(memcpy_orig) movq %rdi, %rax cmpq $0x20, %rdx @@ -182,7 +182,7 @@ ENTRY(memcpy_orig) .Lend: retq -ENDPROC(memcpy_orig) +SYM_FUNC_END(memcpy_orig) #ifndef CONFIG_UML @@ -193,7 +193,7 @@ MCSAFE_TEST_CTL * Note that we only catch machine checks when reading the source addresses. * Writes to target are posted and don't generate machine checks. */ -ENTRY(__memcpy_mcsafe) +SYM_FUNC_START(__memcpy_mcsafe) cmpl $8, %edx /* Less than 8 bytes? Go to byte copy loop */ jb .L_no_whole_words @@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe) xorl %eax, %eax .L_done: ret -ENDPROC(__memcpy_mcsafe) +SYM_FUNC_END(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) .section .fixup, "ax" diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S index f8f3dc0a6690..fd5d25a474b7 100644 --- a/tools/arch/x86/lib/memset_64.S +++ b/tools/arch/x86/lib/memset_64.S @@ -18,8 +18,8 @@ * * rax original destination */ -ENTRY(memset) -ENTRY(__memset) +SYM_FUNC_START_ALIAS(memset) +SYM_FUNC_START(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended * to use it when possible. If not available, use fast string instructions. @@ -42,8 +42,8 @@ ENTRY(__memset) rep stosb movq %r9,%rax ret -ENDPROC(memset) -ENDPROC(__memset) +SYM_FUNC_END(__memset) +SYM_FUNC_END_ALIAS(memset) /* * ISO C memset - set a memory block to a byte value. This function uses @@ -56,16 +56,16 @@ ENDPROC(__memset) * * rax original destination */ -ENTRY(memset_erms) +SYM_FUNC_START(memset_erms) movq %rdi,%r9 movb %sil,%al movq %rdx,%rcx rep stosb movq %r9,%rax ret -ENDPROC(memset_erms) +SYM_FUNC_END(memset_erms) -ENTRY(memset_orig) +SYM_FUNC_START(memset_orig) movq %rdi,%r10 /* expand byte value */ @@ -136,4 +136,4 @@ ENTRY(memset_orig) subq %r8,%rdx jmp .Lafter_bad_alignment .Lfinal: -ENDPROC(memset_orig) +SYM_FUNC_END(memset_orig) diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index d66131f69689..397e5716ab6d 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -26,7 +26,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw, bool is_plain_text) { if (is_plain_text) - jsonw_printf(jw, "%p", data); + jsonw_printf(jw, "%p", *(void **)data); else jsonw_printf(jw, "%lu", *(unsigned long *)data); } diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 4535c863d2cd..2ce9c5ba1934 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -493,7 +493,7 @@ static int do_dump(int argc, char **argv) info = &info_linear->info; if (mode == DUMP_JITED) { - if (info->jited_prog_len == 0) { + if (info->jited_prog_len == 0 || !info->jited_prog_insns) { p_info("no instructions returned"); goto err_free; } diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index 494d7ae3614d..5b91ee65a080 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -174,7 +174,7 @@ static const char *print_call(void *private_data, struct kernel_sym *sym; if (insn->src_reg == BPF_PSEUDO_CALL && - (__u32) insn->imm < dd->nr_jited_ksyms) + (__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms) address = dd->jited_ksyms[insn->imm]; sym = kernel_syms_search(dd, address); diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 8a5b2f8f8eb9..868bf7996c0f 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -778,11 +778,12 @@ struct drm_syncobj_array { __u32 pad; }; +#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */ struct drm_syncobj_timeline_array { __u64 handles; __u64 points; __u32 count_handles; - __u32 pad; + __u32 flags; }; diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 469dc512cca3..5400d7e057f1 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -611,6 +611,13 @@ typedef struct drm_i915_irq_wait { * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT. */ #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53 + +/* + * Revision of the i915-perf uAPI. The value returned helps determine what + * i915-perf features are available. See drm_i915_perf_property_id. + */ +#define I915_PARAM_PERF_REVISION 54 + /* Must be kept compact -- no holes and well documented */ typedef struct drm_i915_getparam { @@ -1565,6 +1572,21 @@ struct drm_i915_gem_context_param { * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) */ #define I915_CONTEXT_PARAM_ENGINES 0xa + +/* + * I915_CONTEXT_PARAM_PERSISTENCE: + * + * Allow the context and active rendering to survive the process until + * completion. Persistence allows fire-and-forget clients to queue up a + * bunch of work, hand the output over to a display server and then quit. + * If the context is marked as not persistent, upon closing (either via + * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure + * or process termination), the context and any outstanding requests will be + * cancelled (and exported fences for cancelled requests marked as -EIO). + * + * By default, new contexts allow persistence. + */ +#define I915_CONTEXT_PARAM_PERSISTENCE 0xb /* Must be kept compact -- no holes and well documented */ __u64 value; @@ -1844,23 +1866,31 @@ enum drm_i915_perf_property_id { * Open the stream for a specific context handle (as used with * execbuffer2). A stream opened for a specific context this way * won't typically require root privileges. + * + * This property is available in perf revision 1. */ DRM_I915_PERF_PROP_CTX_HANDLE = 1, /** * A value of 1 requests the inclusion of raw OA unit reports as * part of stream samples. + * + * This property is available in perf revision 1. */ DRM_I915_PERF_PROP_SAMPLE_OA, /** * The value specifies which set of OA unit metrics should be * be configured, defining the contents of any OA unit reports. + * + * This property is available in perf revision 1. */ DRM_I915_PERF_PROP_OA_METRICS_SET, /** * The value specifies the size and layout of OA unit reports. + * + * This property is available in perf revision 1. */ DRM_I915_PERF_PROP_OA_FORMAT, @@ -1870,9 +1900,22 @@ enum drm_i915_perf_property_id { * from this exponent as follows: * * 80ns * 2^(period_exponent + 1) + * + * This property is available in perf revision 1. */ DRM_I915_PERF_PROP_OA_EXPONENT, + /** + * Specifying this property is only valid when specify a context to + * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property + * will hold preemption of the particular context we want to gather + * performance data about. The execbuf2 submissions must include a + * drm_i915_gem_execbuffer_ext_perf parameter for this to apply. + * + * This property is available in perf revision 3. + */ + DRM_I915_PERF_PROP_HOLD_PREEMPTION, + DRM_I915_PERF_PROP_MAX /* non-ABI */ }; @@ -1901,6 +1944,8 @@ struct drm_i915_perf_open_param { * to close and re-open a stream with the same configuration. * * It's undefined whether any pending data for the stream will be lost. + * + * This ioctl is available in perf revision 1. */ #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) @@ -1908,10 +1953,25 @@ struct drm_i915_perf_open_param { * Disable data capture for a stream. * * It is an error to try and read a stream that is disabled. + * + * This ioctl is available in perf revision 1. */ #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) /** + * Change metrics_set captured by a stream. + * + * If the stream is bound to a specific context, the configuration change + * will performed inline with that context such that it takes effect before + * the next execbuf submission. + * + * Returns the previously bound metrics set id, or a negative error code. + * + * This ioctl is available in perf revision 2. + */ +#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) + +/** * Common to all i915 perf records */ struct drm_i915_perf_record_header { @@ -1984,6 +2044,7 @@ struct drm_i915_query_item { __u64 query_id; #define DRM_I915_QUERY_TOPOLOGY_INFO 1 #define DRM_I915_QUERY_ENGINE_INFO 2 +#define DRM_I915_QUERY_PERF_CONFIG 3 /* Must be kept compact -- no holes and well documented */ /* @@ -1995,9 +2056,18 @@ struct drm_i915_query_item { __s32 length; /* - * Unused for now. Must be cleared to zero. + * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. + * + * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the + * following : + * - DRM_I915_QUERY_PERF_CONFIG_LIST + * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID + * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID */ __u32 flags; +#define DRM_I915_QUERY_PERF_CONFIG_LIST 1 +#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 +#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 /* * Data will be written at the location pointed by data_ptr when the @@ -2033,8 +2103,10 @@ struct drm_i915_query { * (data[X / 8] >> (X % 8)) & 1 * * - the subslice mask for each slice with one bit per subslice telling - * whether a subslice is available. The availability of subslice Y in slice - * X can be queried with the following formula : + * whether a subslice is available. Gen12 has dual-subslices, which are + * similar to two gen11 subslices. For gen12, this array represents dual- + * subslices. The availability of subslice Y in slice X can be queried + * with the following formula : * * (data[subslice_offset + * X * subslice_stride + @@ -2123,6 +2195,56 @@ struct drm_i915_query_engine_info { struct drm_i915_engine_info engines[]; }; +/* + * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG. + */ +struct drm_i915_query_perf_config { + union { + /* + * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets + * this fields to the number of configurations available. + */ + __u64 n_configs; + + /* + * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, + * i915 will use the value in this field as configuration + * identifier to decide what data to write into config_ptr. + */ + __u64 config; + + /* + * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, + * i915 will use the value in this field as configuration + * identifier to decide what data to write into config_ptr. + * + * String formatted like "%08x-%04x-%04x-%04x-%012x" + */ + char uuid[36]; + }; + + /* + * Unused for now. Must be cleared to zero. + */ + __u32 flags; + + /* + * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will + * write an array of __u64 of configuration identifiers. + * + * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will + * write a struct drm_i915_perf_oa_config. If the following fields of + * drm_i915_perf_oa_config are set not set to 0, i915 will write into + * the associated pointers the values of submitted when the + * configuration was created : + * + * - n_mux_regs + * - n_boolean_regs + * - n_flex_regs + */ + __u8 data[]; +}; + #if defined(__cplusplus) } #endif diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h index 39ccfe9311c3..1beb174ad950 100644 --- a/tools/include/uapi/linux/fscrypt.h +++ b/tools/include/uapi/linux/fscrypt.h @@ -17,7 +17,8 @@ #define FSCRYPT_POLICY_FLAGS_PAD_32 0x03 #define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 -#define FSCRYPT_POLICY_FLAGS_VALID 0x07 +#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08 +#define FSCRYPT_POLICY_FLAGS_VALID 0x0F /* Encryption algorithms */ #define FSCRYPT_MODE_AES_256_XTS 1 diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 52641d8ca9e8..f0a16b4adbbd 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -235,6 +235,7 @@ struct kvm_hyperv_exit { #define KVM_EXIT_S390_STSI 25 #define KVM_EXIT_IOAPIC_EOI 26 #define KVM_EXIT_HYPERV 27 +#define KVM_EXIT_ARM_NISV 28 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -394,6 +395,11 @@ struct kvm_run { } eoi; /* KVM_EXIT_HYPERV */ struct kvm_hyperv_exit hyperv; + /* KVM_EXIT_ARM_NISV */ + struct { + __u64 esr_iss; + __u64 fault_ipa; + } arm_nisv; /* Fix the size of the union. */ char padding[256]; }; @@ -1000,6 +1006,9 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PMU_EVENT_FILTER 173 #define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174 #define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175 +#define KVM_CAP_PPC_GUEST_DEBUG_SSTEP 176 +#define KVM_CAP_ARM_NISV_TO_USER 177 +#define KVM_CAP_ARM_INJECT_EXT_DABT 178 #ifdef KVM_CAP_IRQ_ROUTING @@ -1227,6 +1236,8 @@ enum kvm_device_type { #define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_XIVE, #define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE + KVM_DEV_TYPE_ARM_PV_TIME, +#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_MAX, }; @@ -1337,6 +1348,7 @@ struct kvm_s390_ucas_mapping { #define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char) /* Available with KVM_CAP_PMU_EVENT_FILTER */ #define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter) +#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h index 99335e1f4a27..4a0217832464 100644 --- a/tools/include/uapi/linux/sched.h +++ b/tools/include/uapi/linux/sched.h @@ -33,27 +33,48 @@ #define CLONE_NEWNET 0x40000000 /* New network namespace */ #define CLONE_IO 0x80000000 /* Clone io context */ +/* Flags for the clone3() syscall. */ +#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */ + #ifndef __ASSEMBLY__ /** * struct clone_args - arguments for the clone3 syscall - * @flags: Flags for the new process as listed above. - * All flags are valid except for CSIGNAL and - * CLONE_DETACHED. - * @pidfd: If CLONE_PIDFD is set, a pidfd will be - * returned in this argument. - * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the - * child process will be returned in the child's - * memory. - * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of - * the child process will be returned in the - * parent's memory. - * @exit_signal: The exit_signal the parent process will be - * sent when the child exits. - * @stack: Specify the location of the stack for the - * child process. - * @stack_size: The size of the stack for the child process. - * @tls: If CLONE_SETTLS is set, the tls descriptor - * is set to tls. + * @flags: Flags for the new process as listed above. + * All flags are valid except for CSIGNAL and + * CLONE_DETACHED. + * @pidfd: If CLONE_PIDFD is set, a pidfd will be + * returned in this argument. + * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the + * child process will be returned in the child's + * memory. + * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of + * the child process will be returned in the + * parent's memory. + * @exit_signal: The exit_signal the parent process will be + * sent when the child exits. + * @stack: Specify the location of the stack for the + * child process. + * Note, @stack is expected to point to the + * lowest address. The stack direction will be + * determined by the kernel and set up + * appropriately based on @stack_size. + * @stack_size: The size of the stack for the child process. + * @tls: If CLONE_SETTLS is set, the tls descriptor + * is set to tls. + * @set_tid: Pointer to an array of type *pid_t. The size + * of the array is defined using @set_tid_size. + * This array is used to select PIDs/TIDs for + * newly created processes. The first element in + * this defines the PID in the most nested PID + * namespace. Each additional element in the array + * defines the PID in the parent PID namespace of + * the original PID namespace. If the array has + * less entries than the number of currently + * nested PID namespaces only the PIDs in the + * corresponding namespaces are set. + * @set_tid_size: This defines the size of the array referenced + * in @set_tid. This cannot be larger than the + * kernel's limit of nested PID namespaces. * * The structure is versioned by size and thus extensible. * New struct members must go at the end of the struct and @@ -68,10 +89,13 @@ struct clone_args { __aligned_u64 stack; __aligned_u64 stack_size; __aligned_u64 tls; + __aligned_u64 set_tid; + __aligned_u64 set_tid_size; }; #endif #define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */ +#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */ /* * Scheduling policies diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h index 7b35e98d3c58..ad80a5c885d5 100644 --- a/tools/include/uapi/linux/stat.h +++ b/tools/include/uapi/linux/stat.h @@ -167,8 +167,8 @@ struct statx { #define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */ #define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */ #define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */ - #define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */ +#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */ #endif /* _UAPI_LINUX_STAT_H */ diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index defae23a0169..97830e46d1a0 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -138,6 +138,7 @@ STATIC_OBJDIR := $(OUTPUT)staticobjs/ BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o VERSION_SCRIPT := libbpf.map +BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET)) LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) @@ -159,7 +160,7 @@ all: fixdep all_cmd: $(CMD_TARGETS) check -$(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h +$(BPF_IN_SHARED): force elfdep bpfdep $(BPF_HELPER_DEFS) @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \ (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true @@ -177,12 +178,12 @@ $(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)" -$(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h +$(BPF_IN_STATIC): force elfdep bpfdep $(BPF_HELPER_DEFS) $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR) -bpf_helper_defs.h: $(srctree)/tools/include/uapi/linux/bpf.h +$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \ - --file $(srctree)/tools/include/uapi/linux/bpf.h > bpf_helper_defs.h + --file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS) $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) @@ -243,7 +244,7 @@ install_lib: all_cmd $(call do_install_mkdir,$(libdir_SQ)); \ cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ) -install_headers: bpf_helper_defs.h +install_headers: $(BPF_HELPER_DEFS) $(call QUIET_INSTALL, headers) \ $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \ @@ -251,7 +252,7 @@ install_headers: bpf_helper_defs.h $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \ $(call do_install,xsk.h,$(prefix)/include/bpf,644); \ $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \ - $(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \ + $(call do_install,$(BPF_HELPER_DEFS),$(prefix)/include/bpf,644); \ $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \ $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \ $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644); @@ -271,7 +272,7 @@ config-clean: clean: $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \ *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \ - *.pc LIBBPF-CFLAGS bpf_helper_defs.h \ + *.pc LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \ $(SHARED_OBJDIR) $(STATIC_OBJDIR) $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index cbb429f55062..c874c017c636 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -39,11 +39,12 @@ DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))' LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) ifeq ($(LP64), 1) - libdir_relative = lib64 + libdir_relative_temp = lib64 else - libdir_relative = lib + libdir_relative_temp = lib endif +libdir_relative ?= $(libdir_relative_temp) prefix ?= /usr/local libdir = $(prefix)/$(libdir_relative) man_dir = $(prefix)/share/man @@ -97,6 +98,7 @@ EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) LIB_TARGET = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION) LIB_INSTALL = libtraceevent.a libtraceevent.so* +LIB_INSTALL := $(addprefix $(OUTPUT),$(LIB_INSTALL)) INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES) @@ -207,10 +209,11 @@ define do_install $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2' endef -PKG_CONFIG_FILE = libtraceevent.pc +PKG_CONFIG_SOURCE_FILE = libtraceevent.pc +PKG_CONFIG_FILE := $(addprefix $(OUTPUT),$(PKG_CONFIG_SOURCE_FILE)) define do_install_pkgconfig_file if [ -n "${pkgconfig_dir}" ]; then \ - cp -f ${PKG_CONFIG_FILE}.template ${PKG_CONFIG_FILE}; \ + cp -f ${PKG_CONFIG_SOURCE_FILE}.template ${PKG_CONFIG_FILE}; \ sed -i "s|INSTALL_PREFIX|${1}|g" ${PKG_CONFIG_FILE}; \ sed -i "s|LIB_VERSION|${EVENT_PARSE_VERSION}|g" ${PKG_CONFIG_FILE}; \ sed -i "s|LIB_DIR|${libdir}|g" ${PKG_CONFIG_FILE}; \ diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c index f3cbf86e51ac..20eed719542e 100644 --- a/tools/lib/traceevent/parse-filter.c +++ b/tools/lib/traceevent/parse-filter.c @@ -1228,8 +1228,10 @@ filter_event(struct tep_event_filter *filter, struct tep_event *event, } filter_type = add_filter_type(filter, event->id); - if (filter_type == NULL) + if (filter_type == NULL) { + free_arg(arg); return TEP_ERRNO__MEM_ALLOC_FAILED; + } if (filter_type->filter) free_arg(filter_type->filter); diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile index f440989fa55e..349bb81482ab 100644 --- a/tools/lib/traceevent/plugins/Makefile +++ b/tools/lib/traceevent/plugins/Makefile @@ -32,11 +32,12 @@ DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))' LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) ifeq ($(LP64), 1) - libdir_relative = lib64 + libdir_relative_tmp = lib64 else - libdir_relative = lib + libdir_relative_tmp = lib endif +libdir_relative ?= $(libdir_relative_tmp) prefix ?= /usr/local libdir = $(prefix)/$(libdir_relative) diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt index 6a5bb2b17039..cf95baef7b61 100644 --- a/tools/perf/Documentation/perf-kvm.txt +++ b/tools/perf/Documentation/perf-kvm.txt @@ -68,10 +68,11 @@ OPTIONS ------- -i:: --input=<path>:: - Input file name. + Input file name, for the 'report', 'diff' and 'buildid-list' subcommands. -o:: --output=<path>:: - Output file name. + Output file name, for the 'record' subcommand. Doesn't work with 'report', + just redirect the output to a file when using 'report'. --host:: Collect host side performance profile. --guest:: diff --git a/tools/perf/arch/arm/tests/regs_load.S b/tools/perf/arch/arm/tests/regs_load.S index 6e2495cc4517..4284307d7822 100644 --- a/tools/perf/arch/arm/tests/regs_load.S +++ b/tools/perf/arch/arm/tests/regs_load.S @@ -37,7 +37,7 @@ .text .type perf_regs_load,%function -ENTRY(perf_regs_load) +SYM_FUNC_START(perf_regs_load) str r0, [r0, #R0] str r1, [r0, #R1] str r2, [r0, #R2] @@ -56,4 +56,4 @@ ENTRY(perf_regs_load) str lr, [r0, #PC] // store pc as lr in order to skip the call // to this function mov pc, lr -ENDPROC(perf_regs_load) +SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/arm64/tests/regs_load.S b/tools/perf/arch/arm64/tests/regs_load.S index 07042511dca9..d49de40b6818 100644 --- a/tools/perf/arch/arm64/tests/regs_load.S +++ b/tools/perf/arch/arm64/tests/regs_load.S @@ -7,7 +7,7 @@ #define LDR_REG(r) ldr x##r, [x0, 8 * r] #define SP (8 * 31) #define PC (8 * 32) -ENTRY(perf_regs_load) +SYM_FUNC_START(perf_regs_load) STR_REG(0) STR_REG(1) STR_REG(2) @@ -44,4 +44,4 @@ ENTRY(perf_regs_load) str x30, [x0, #PC] LDR_REG(1) ret -ENDPROC(perf_regs_load) +SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/x86/tests/regs_load.S b/tools/perf/arch/x86/tests/regs_load.S index bbe5a0d16e51..80f14f52e3f6 100644 --- a/tools/perf/arch/x86/tests/regs_load.S +++ b/tools/perf/arch/x86/tests/regs_load.S @@ -28,7 +28,7 @@ .text #ifdef HAVE_ARCH_X86_64_SUPPORT -ENTRY(perf_regs_load) +SYM_FUNC_START(perf_regs_load) movq %rax, AX(%rdi) movq %rbx, BX(%rdi) movq %rcx, CX(%rdi) @@ -60,9 +60,9 @@ ENTRY(perf_regs_load) movq %r14, R14(%rdi) movq %r15, R15(%rdi) ret -ENDPROC(perf_regs_load) +SYM_FUNC_END(perf_regs_load) #else -ENTRY(perf_regs_load) +SYM_FUNC_START(perf_regs_load) push %edi movl 8(%esp), %edi movl %eax, AX(%edi) @@ -88,7 +88,7 @@ ENTRY(perf_regs_load) movl $0, FS(%edi) movl $0, GS(%edi) ret -ENDPROC(perf_regs_load) +SYM_FUNC_END(perf_regs_load) #endif /* diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 9664a72a089d..7e124a7b8bfd 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -403,17 +403,6 @@ static int perf_event__repipe_tracing_data(struct perf_session *session, return err; } -static int perf_event__repipe_id_index(struct perf_session *session, - union perf_event *event) -{ - int err; - - perf_event__repipe_synth(session->tool, event); - err = perf_event__process_id_index(session, event); - - return err; -} - static int dso__read_build_id(struct dso *dso) { if (dso->has_build_id) @@ -651,7 +640,7 @@ static int __cmd_inject(struct perf_inject *inject) inject->tool.comm = perf_event__repipe_comm; inject->tool.namespaces = perf_event__repipe_namespaces; inject->tool.exit = perf_event__repipe_exit; - inject->tool.id_index = perf_event__repipe_id_index; + inject->tool.id_index = perf_event__process_id_index; inject->tool.auxtrace_info = perf_event__process_auxtrace_info; inject->tool.auxtrace = perf_event__process_auxtrace; inject->tool.aux = perf_event__drop_aux; diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b5063d3b6fd0..fb19ef63cc35 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -832,7 +832,7 @@ try_again: if ((errno == EINVAL || errno == EBADF) && pos->leader != pos && pos->weak_group) { - pos = perf_evlist__reset_weak_group(evlist, pos); + pos = perf_evlist__reset_weak_group(evlist, pos, true); goto try_again; } rc = -errno; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 830d563de889..de988589d99b 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -388,6 +388,14 @@ static int report__setup_sample_type(struct report *rep) } } + if (sort__mode == SORT_MODE__MEMORY) { + if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) { + ui__error("Selected --mem-mode but no mem data. " + "Did you call perf record without -d?\n"); + return -1; + } + } + if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) { if ((sample_type & PERF_SAMPLE_REGS_USER) && (sample_type & PERF_SAMPLE_STACK_USER)) { @@ -1068,6 +1076,7 @@ int cmd_report(int argc, const char **argv) struct stat st; bool has_br_stack = false; int branch_mode = -1; + int last_key = 0; bool branch_call_mode = false; #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent" static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n" @@ -1442,7 +1451,8 @@ repeat: sort_order = sort_tmp; } - if (setup_sorting(session->evlist) < 0) { + if ((last_key != K_SWITCH_INPUT_DATA) && + (setup_sorting(session->evlist) < 0)) { if (sort_order) parse_options_usage(report_usage, options, "s", 1); if (field_order) @@ -1522,6 +1532,7 @@ repeat: ret = __cmd_report(&report); if (ret == K_SWITCH_INPUT_DATA) { perf_session__delete(session); + last_key = K_SWITCH_INPUT_DATA; goto repeat; } else ret = 0; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0a15253b438c..a098c2ebf4ea 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -65,6 +65,7 @@ #include "util/target.h" #include "util/time-utils.h" #include "util/top.h" +#include "util/affinity.h" #include "asm/bug.h" #include <linux/time64.h> @@ -265,15 +266,10 @@ static int read_single_counter(struct evsel *counter, int cpu, * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */ -static int read_counter(struct evsel *counter, struct timespec *rs) +static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) { int nthreads = perf_thread_map__nr(evsel_list->core.threads); - int ncpus, cpu, thread; - - if (target__has_cpu(&target) && !target__has_per_thread(&target)) - ncpus = perf_evsel__nr_cpus(counter); - else - ncpus = 1; + int thread; if (!counter->supported) return -ENOENT; @@ -282,40 +278,38 @@ static int read_counter(struct evsel *counter, struct timespec *rs) nthreads = 1; for (thread = 0; thread < nthreads; thread++) { - for (cpu = 0; cpu < ncpus; cpu++) { - struct perf_counts_values *count; - - count = perf_counts(counter->counts, cpu, thread); - - /* - * The leader's group read loads data into its group members - * (via perf_evsel__read_counter) and sets threir count->loaded. - */ - if (!perf_counts__is_loaded(counter->counts, cpu, thread) && - read_single_counter(counter, cpu, thread, rs)) { - counter->counts->scaled = -1; - perf_counts(counter->counts, cpu, thread)->ena = 0; - perf_counts(counter->counts, cpu, thread)->run = 0; - return -1; - } + struct perf_counts_values *count; - perf_counts__set_loaded(counter->counts, cpu, thread, false); + count = perf_counts(counter->counts, cpu, thread); - if (STAT_RECORD) { - if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { - pr_err("failed to write stat event\n"); - return -1; - } - } + /* + * The leader's group read loads data into its group members + * (via perf_evsel__read_counter()) and sets their count->loaded. + */ + if (!perf_counts__is_loaded(counter->counts, cpu, thread) && + read_single_counter(counter, cpu, thread, rs)) { + counter->counts->scaled = -1; + perf_counts(counter->counts, cpu, thread)->ena = 0; + perf_counts(counter->counts, cpu, thread)->run = 0; + return -1; + } + + perf_counts__set_loaded(counter->counts, cpu, thread, false); - if (verbose > 1) { - fprintf(stat_config.output, - "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", - perf_evsel__name(counter), - cpu, - count->val, count->ena, count->run); + if (STAT_RECORD) { + if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { + pr_err("failed to write stat event\n"); + return -1; } } + + if (verbose > 1) { + fprintf(stat_config.output, + "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", + perf_evsel__name(counter), + cpu, + count->val, count->ena, count->run); + } } return 0; @@ -324,15 +318,37 @@ static int read_counter(struct evsel *counter, struct timespec *rs) static void read_counters(struct timespec *rs) { struct evsel *counter; - int ret; + struct affinity affinity; + int i, ncpus, cpu; + + if (affinity__setup(&affinity) < 0) + return; + + ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); + if (!target__has_cpu(&target) || target__has_per_thread(&target)) + ncpus = 1; + evlist__for_each_cpu(evsel_list, i, cpu) { + if (i >= ncpus) + break; + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evsel_list, counter) { + if (evsel__cpu_iter_skip(counter, cpu)) + continue; + if (!counter->err) { + counter->err = read_counter_cpu(counter, rs, + counter->cpu_iter - 1); + } + } + } + affinity__cleanup(&affinity); evlist__for_each_entry(evsel_list, counter) { - ret = read_counter(counter, rs); - if (ret) + if (counter->err) pr_debug("failed to read counter %s\n", counter->name); - - if (ret == 0 && perf_stat_process_counter(&stat_config, counter)) + if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) pr_warning("failed to process counter %s\n", counter->name); + counter->err = 0; } } @@ -420,6 +436,62 @@ static bool is_target_alive(struct target *_target, return false; } +enum counter_recovery { + COUNTER_SKIP, + COUNTER_RETRY, + COUNTER_FATAL, +}; + +static enum counter_recovery stat_handle_error(struct evsel *counter) +{ + char msg[BUFSIZ]; + /* + * PPC returns ENXIO for HW counters until 2.6.37 + * (behavior changed with commit b0a873e). + */ + if (errno == EINVAL || errno == ENOSYS || + errno == ENOENT || errno == EOPNOTSUPP || + errno == ENXIO) { + if (verbose > 0) + ui__warning("%s event is not supported by the kernel.\n", + perf_evsel__name(counter)); + counter->supported = false; + /* + * errored is a sticky flag that means one of the counter's + * cpu event had a problem and needs to be reexamined. + */ + counter->errored = true; + + if ((counter->leader != counter) || + !(counter->leader->core.nr_members > 1)) + return COUNTER_SKIP; + } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { + if (verbose > 0) + ui__warning("%s\n", msg); + return COUNTER_RETRY; + } else if (target__has_per_thread(&target) && + evsel_list->core.threads && + evsel_list->core.threads->err_thread != -1) { + /* + * For global --per-thread case, skip current + * error thread. + */ + if (!thread_map__remove(evsel_list->core.threads, + evsel_list->core.threads->err_thread)) { + evsel_list->core.threads->err_thread = -1; + return COUNTER_RETRY; + } + } + + perf_evsel__open_strerror(counter, &target, + errno, msg, sizeof(msg)); + ui__error("%s\n", msg); + + if (child_pid != -1) + kill(child_pid, SIGTERM); + return COUNTER_FATAL; +} + static int __run_perf_stat(int argc, const char **argv, int run_idx) { int interval = stat_config.interval; @@ -433,6 +505,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) int status = 0; const bool forks = (argc > 0); bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; + struct affinity affinity; + int i, cpu; + bool second_pass = false; if (interval) { ts.tv_sec = interval / USEC_PER_MSEC; @@ -457,61 +532,104 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) if (group) perf_evlist__set_leader(evsel_list); - evlist__for_each_entry(evsel_list, counter) { + if (affinity__setup(&affinity) < 0) + return -1; + + evlist__for_each_cpu (evsel_list, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evsel_list, counter) { + if (evsel__cpu_iter_skip(counter, cpu)) + continue; + if (counter->reset_group || counter->errored) + continue; try_again: - if (create_perf_stat_counter(counter, &stat_config, &target) < 0) { - - /* Weak group failed. Reset the group. */ - if ((errno == EINVAL || errno == EBADF) && - counter->leader != counter && - counter->weak_group) { - counter = perf_evlist__reset_weak_group(evsel_list, counter); - goto try_again; - } + if (create_perf_stat_counter(counter, &stat_config, &target, + counter->cpu_iter - 1) < 0) { - /* - * PPC returns ENXIO for HW counters until 2.6.37 - * (behavior changed with commit b0a873e). - */ - if (errno == EINVAL || errno == ENOSYS || - errno == ENOENT || errno == EOPNOTSUPP || - errno == ENXIO) { - if (verbose > 0) - ui__warning("%s event is not supported by the kernel.\n", - perf_evsel__name(counter)); - counter->supported = false; - - if ((counter->leader != counter) || - !(counter->leader->core.nr_members > 1)) - continue; - } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { - if (verbose > 0) - ui__warning("%s\n", msg); - goto try_again; - } else if (target__has_per_thread(&target) && - evsel_list->core.threads && - evsel_list->core.threads->err_thread != -1) { /* - * For global --per-thread case, skip current - * error thread. + * Weak group failed. We cannot just undo this here + * because earlier CPUs might be in group mode, and the kernel + * doesn't support mixing group and non group reads. Defer + * it to later. + * Don't close here because we're in the wrong affinity. */ - if (!thread_map__remove(evsel_list->core.threads, - evsel_list->core.threads->err_thread)) { - evsel_list->core.threads->err_thread = -1; + if ((errno == EINVAL || errno == EBADF) && + counter->leader != counter && + counter->weak_group) { + perf_evlist__reset_weak_group(evsel_list, counter, false); + assert(counter->reset_group); + second_pass = true; + continue; + } + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: goto try_again; + case COUNTER_SKIP: + continue; + default: + break; } + } + counter->supported = true; + } + } - perf_evsel__open_strerror(counter, &target, - errno, msg, sizeof(msg)); - ui__error("%s\n", msg); + if (second_pass) { + /* + * Now redo all the weak group after closing them, + * and also close errored counters. + */ - if (child_pid != -1) - kill(child_pid, SIGTERM); + evlist__for_each_cpu(evsel_list, i, cpu) { + affinity__set(&affinity, cpu); + /* First close errored or weak retry */ + evlist__for_each_entry(evsel_list, counter) { + if (!counter->reset_group && !counter->errored) + continue; + if (evsel__cpu_iter_skip_no_inc(counter, cpu)) + continue; + perf_evsel__close_cpu(&counter->core, counter->cpu_iter); + } + /* Now reopen weak */ + evlist__for_each_entry(evsel_list, counter) { + if (!counter->reset_group && !counter->errored) + continue; + if (evsel__cpu_iter_skip(counter, cpu)) + continue; + if (!counter->reset_group) + continue; +try_again_reset: + pr_debug2("reopening weak %s\n", perf_evsel__name(counter)); + if (create_perf_stat_counter(counter, &stat_config, &target, + counter->cpu_iter - 1) < 0) { + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again_reset; + case COUNTER_SKIP: + continue; + default: + break; + } + } + counter->supported = true; + } + } + } + affinity__cleanup(&affinity); - return -1; + evlist__for_each_entry(evsel_list, counter) { + if (!counter->supported) { + perf_evsel__free_fd(&counter->core); + continue; } - counter->supported = true; l = strlen(counter->unit); if (l > stat_config.unit_width) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index dc80044bc46f..795e353de095 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1568,9 +1568,13 @@ int cmd_top(int argc, const char **argv) */ status = perf_env__read_cpuid(&perf_env); if (status) { - pr_err("Couldn't read the cpuid for this machine: %s\n", - str_error_r(errno, errbuf, sizeof(errbuf))); - goto out_delete_evlist; + /* + * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise + * warn the user explicitely. + */ + eprintf(status == ENOSYS ? 1 : 0, verbose, + "Couldn't read the cpuid for this machine: %s\n", + str_error_r(errno, errbuf, sizeof(errbuf))); } top.evlist->env = &perf_env; diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index a1dc16724352..68039a96c1dc 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -110,8 +110,8 @@ for i in $FILES; do done # diff with extra ignore lines -check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' -check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' +check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"' +check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"' check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"' check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' check include/linux/ctype.h '-I "isdigit("' diff --git a/tools/perf/lib/cpumap.c b/tools/perf/lib/cpumap.c index 2ca1fafa620d..f93f4e703e4c 100644 --- a/tools/perf/lib/cpumap.c +++ b/tools/perf/lib/cpumap.c @@ -68,14 +68,28 @@ static struct perf_cpu_map *cpu_map__default_new(void) return cpus; } +static int cmp_int(const void *a, const void *b) +{ + return *(const int *)a - *(const int*)b; +} + static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) { size_t payload_size = nr_cpus * sizeof(int); struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); + int i, j; if (cpus != NULL) { - cpus->nr = nr_cpus; memcpy(cpus->map, tmp_cpus, payload_size); + qsort(cpus->map, nr_cpus, sizeof(int), cmp_int); + /* Remove dups */ + j = 0; + for (i = 0; i < nr_cpus; i++) { + if (i == 0 || cpus->map[i] != cpus->map[i - 1]) + cpus->map[j++] = cpus->map[i]; + } + cpus->nr = j; + assert(j <= nr_cpus); refcount_set(&cpus->refcnt, 1); } @@ -272,3 +286,60 @@ int perf_cpu_map__max(struct perf_cpu_map *map) return max; } + +/* + * Merge two cpumaps + * + * orig either gets freed and replaced with a new map, or reused + * with no reference count change (similar to "realloc") + * other has its reference count increased. + */ + +struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, + struct perf_cpu_map *other) +{ + int *tmp_cpus; + int tmp_len; + int i, j, k; + struct perf_cpu_map *merged; + + if (!orig && !other) + return NULL; + if (!orig) { + perf_cpu_map__get(other); + return other; + } + if (!other) + return orig; + if (orig->nr == other->nr && + !memcmp(orig->map, other->map, orig->nr * sizeof(int))) + return orig; + + tmp_len = orig->nr + other->nr; + tmp_cpus = malloc(tmp_len * sizeof(int)); + if (!tmp_cpus) + return NULL; + + /* Standard merge algorithm from wikipedia */ + i = j = k = 0; + while (i < orig->nr && j < other->nr) { + if (orig->map[i] <= other->map[j]) { + if (orig->map[i] == other->map[j]) + j++; + tmp_cpus[k++] = orig->map[i++]; + } else + tmp_cpus[k++] = other->map[j++]; + } + + while (i < orig->nr) + tmp_cpus[k++] = orig->map[i++]; + + while (j < other->nr) + tmp_cpus[k++] = other->map[j++]; + assert(k <= tmp_len); + + merged = cpu_map__trim_new(k, tmp_cpus); + free(tmp_cpus); + perf_cpu_map__put(orig); + return merged; +} diff --git a/tools/perf/lib/evlist.c b/tools/perf/lib/evlist.c index 205ddbb80bc1..ae9e65aa2491 100644 --- a/tools/perf/lib/evlist.c +++ b/tools/perf/lib/evlist.c @@ -54,6 +54,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, perf_thread_map__put(evsel->threads); evsel->threads = perf_thread_map__get(evlist->threads); + evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus); } static void perf_evlist__propagate_maps(struct perf_evlist *evlist) diff --git a/tools/perf/lib/evsel.c b/tools/perf/lib/evsel.c index 5a89857b0381..4dc06289f4c7 100644 --- a/tools/perf/lib/evsel.c +++ b/tools/perf/lib/evsel.c @@ -114,16 +114,23 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, return err; } +static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu) +{ + int thread; + + for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { + if (FD(evsel, cpu, thread) >= 0) + close(FD(evsel, cpu, thread)); + FD(evsel, cpu, thread) = -1; + } +} + void perf_evsel__close_fd(struct perf_evsel *evsel) { - int cpu, thread; + int cpu; for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) - for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { - if (FD(evsel, cpu, thread) >= 0) - close(FD(evsel, cpu, thread)); - FD(evsel, cpu, thread) = -1; - } + perf_evsel__close_fd_cpu(evsel, cpu); } void perf_evsel__free_fd(struct perf_evsel *evsel) @@ -141,6 +148,14 @@ void perf_evsel__close(struct perf_evsel *evsel) perf_evsel__free_fd(evsel); } +void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu) +{ + if (evsel->fd == NULL) + return; + + perf_evsel__close_fd_cpu(evsel, cpu); +} + int perf_evsel__read_size(struct perf_evsel *evsel) { u64 read_format = evsel->attr.read_format; @@ -183,38 +198,61 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, } static int perf_evsel__run_ioctl(struct perf_evsel *evsel, - int ioc, void *arg) + int ioc, void *arg, + int cpu) { - int cpu, thread; + int thread; - for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { - for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { - int fd = FD(evsel, cpu, thread), - err = ioctl(fd, ioc, arg); + for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { + int fd = FD(evsel, cpu, thread), + err = ioctl(fd, ioc, arg); - if (err) - return err; - } + if (err) + return err; } return 0; } +int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu) +{ + return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu); +} + int perf_evsel__enable(struct perf_evsel *evsel) { - return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0); + int i; + int err = 0; + + for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) + err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i); + return err; +} + +int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu) +{ + return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu); } int perf_evsel__disable(struct perf_evsel *evsel) { - return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0); + int i; + int err = 0; + + for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) + err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i); + return err; } int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter) { - return perf_evsel__run_ioctl(evsel, + int err = 0, i; + + for (i = 0; i < evsel->cpus->nr && !err; i++) + err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_SET_FILTER, - (void *)filter); + (void *)filter, i); + return err; } struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) diff --git a/tools/perf/lib/include/internal/evlist.h b/tools/perf/lib/include/internal/evlist.h index a2fbccf1922f..74dc8c3f0b66 100644 --- a/tools/perf/lib/include/internal/evlist.h +++ b/tools/perf/lib/include/internal/evlist.h @@ -18,6 +18,7 @@ struct perf_evlist { int nr_entries; bool has_user_cpus; struct perf_cpu_map *cpus; + struct perf_cpu_map *all_cpus; struct perf_thread_map *threads; int nr_mmaps; size_t mmap_len; diff --git a/tools/perf/lib/include/perf/cpumap.h b/tools/perf/lib/include/perf/cpumap.h index ac9aa497f84a..6a17ad730cbc 100644 --- a/tools/perf/lib/include/perf/cpumap.h +++ b/tools/perf/lib/include/perf/cpumap.h @@ -12,6 +12,8 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void); LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file); LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); +LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, + struct perf_cpu_map *other); LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); diff --git a/tools/perf/lib/include/perf/evsel.h b/tools/perf/lib/include/perf/evsel.h index 557f5815a9c9..c82ec39a4ad0 100644 --- a/tools/perf/lib/include/perf/evsel.h +++ b/tools/perf/lib/include/perf/evsel.h @@ -26,10 +26,13 @@ LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel); LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads); LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel); +LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu); LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count); LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel); +LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu); LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel); +LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu); LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel); LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel); LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel); diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json index 436ce33f1182..5da8296b667e 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json @@ -32,7 +32,7 @@ "EventCode": "132", "EventName": "DTLB1_GPAGE_WRITES", "BriefDescription": "DTLB1 Two-Gigabyte Page Writes", - "PublicDescription": "Counter:132 Name:DTLB1_GPAGE_WRITES A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page." + "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page." }, { "Unit": "CPU-M-CF", diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json index 68618152ea2c..89e070727e1b 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json @@ -4,7 +4,7 @@ "EventCode": "128", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", - "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" + "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" }, { "Unit": "CPU-M-CF", diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json index bc7151d639d7..45a34ce4fe89 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json @@ -297,7 +297,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json index 49c5f123d811..961fe4395758 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json @@ -115,7 +115,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json index 113d19e92678..746734ce09be 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json @@ -297,7 +297,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json index 2ba32af9bc36..f94653229dd4 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json @@ -315,7 +315,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json index c80f16fde6d0..5402cd3120f9 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json +++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json @@ -267,7 +267,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json index e501729c3dd1..832f3cb40b34 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json @@ -267,7 +267,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json index e2446966b651..d69b2a8fc0bc 100644 --- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json +++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json @@ -285,7 +285,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json index 9294769dec64..5f465fd81315 100644 --- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json +++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json @@ -285,7 +285,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json index 603ff9c2e9a1..3e909b306003 100644 --- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json +++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json @@ -171,7 +171,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json index c6b485b3a2cb..50c053235752 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json @@ -171,7 +171,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json index 0ca539bb60f6..e7feb60f9fa9 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json @@ -303,7 +303,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json index 047d7e11aa6f..21d7a0c2c2e8 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json @@ -315,7 +315,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index a3c595fba943..1692529639b0 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -54,6 +54,7 @@ perf-y += unit_number__scnprintf.o perf-y += mem2node.o perf-y += maps.o perf-y += time-utils-test.o +perf-y += genelf.o $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build $(call rule_mkdir) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 7115aa32a51e..5f05db75cdd8 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -260,6 +260,11 @@ static struct test generic_tests[] = { .func = test__cpu_map_print, }, { + .desc = "Merge cpu map", + .func = test__cpu_map_merge, + }, + + { .desc = "Probe SDT events", .func = test__sdt_event, }, @@ -297,6 +302,10 @@ static struct test generic_tests[] = { .func = test__time_utils, }, { + .desc = "Test jit_write_elf", + .func = test__jit_write_elf, + }, + { .desc = "maps__merge_in", .func = test__maps__merge_in, }, diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c index 8a0d236202b0..4ac56741ac5f 100644 --- a/tools/perf/tests/cpumap.c +++ b/tools/perf/tests/cpumap.c @@ -120,3 +120,19 @@ int test__cpu_map_print(struct test *test __maybe_unused, int subtest __maybe_un TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1-10,12-20,22-30,32-40")); return 0; } + +int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_unused) +{ + struct perf_cpu_map *a = perf_cpu_map__new("4,2,1"); + struct perf_cpu_map *b = perf_cpu_map__new("4,5,7"); + struct perf_cpu_map *c = perf_cpu_map__merge(a, b); + char buf[100]; + + TEST_ASSERT_VAL("failed to merge map: bad nr", c->nr == 5); + cpu_map__snprint(c, buf, sizeof(buf)); + TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7")); + perf_cpu_map__put(a); + perf_cpu_map__put(b); + perf_cpu_map__put(c); + return 0; +} diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c index 1ee8704e2284..1e8a9f5c356d 100644 --- a/tools/perf/tests/event-times.c +++ b/tools/perf/tests/event-times.c @@ -125,7 +125,7 @@ static int attach__cpu_disabled(struct evlist *evlist) evsel->core.attr.disabled = 1; - err = perf_evsel__open_per_cpu(evsel, cpus); + err = perf_evsel__open_per_cpu(evsel, cpus, -1); if (err) { if (err == -EACCES) return TEST_SKIP; @@ -152,7 +152,7 @@ static int attach__cpu_enabled(struct evlist *evlist) return -1; } - err = perf_evsel__open_per_cpu(evsel, cpus); + err = perf_evsel__open_per_cpu(evsel, cpus, -1); if (err == -EACCES) return TEST_SKIP; diff --git a/tools/perf/tests/genelf.c b/tools/perf/tests/genelf.c new file mode 100644 index 000000000000..f797f9823e89 --- /dev/null +++ b/tools/perf/tests/genelf.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <limits.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <linux/compiler.h> + +#include "debug.h" +#include "tests.h" + +#ifdef HAVE_JITDUMP +#include <libelf.h> +#include "../util/genelf.h" +#endif + +#define TEMPL "/tmp/perf-test-XXXXXX" + +int test__jit_write_elf(struct test *test __maybe_unused, + int subtest __maybe_unused) +{ +#ifdef HAVE_JITDUMP + static unsigned char x86_code[] = { + 0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */ + 0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */ + 0xCD, 0x80 /* int $0x80 */ + }; + char path[PATH_MAX]; + int fd, ret; + + strcpy(path, TEMPL); + + fd = mkstemp(path); + if (fd < 0) { + perror("mkstemp failed"); + return TEST_FAIL; + } + + pr_info("Writing jit code to: %s\n", path); + + ret = jit_write_elf(fd, 0, "main", x86_code, sizeof(x86_code), + NULL, 0, NULL, 0, 0); + close(fd); + + unlink(path); + + return ret ? TEST_FAIL : 0; +#else + return TEST_SKIP; +#endif +} diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 25aea387e2bf..9a160fef47c9 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -98,6 +98,7 @@ int test__event_update(struct test *test, int subtest); int test__event_times(struct test *test, int subtest); int test__backward_ring_buffer(struct test *test, int subtest); int test__cpu_map_print(struct test *test, int subtest); +int test__cpu_map_merge(struct test *test, int subtest); int test__sdt_event(struct test *test, int subtest); int test__is_printable_array(struct test *test, int subtest); int test__bitmap_print(struct test *test, int subtest); @@ -109,6 +110,7 @@ int test__unit_number__scnprint(struct test *test, int subtest); int test__mem2node(struct test *t, int subtest); int test__maps__merge_in(struct test *t, int subtest); int test__time_utils(struct test *t, int subtest); +int test__jit_write_elf(struct test *test, int subtest); bool test__bp_signal_is_supported(void); bool test__bp_account_is_supported(void); diff --git a/tools/perf/trace/beauty/clone.c b/tools/perf/trace/beauty/clone.c index 1a8d3be2030e..062ca849c8fd 100644 --- a/tools/perf/trace/beauty/clone.c +++ b/tools/perf/trace/beauty/clone.c @@ -45,6 +45,7 @@ static size_t clone__scnprintf_flags(unsigned long flags, char *bf, size_t size, P_FLAG(NEWPID); P_FLAG(NEWNET); P_FLAG(IO); + P_FLAG(CLEAR_SIGHAND); #undef P_FLAG if (flags) diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 57943f3685f8..3a442f021468 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -63,4 +63,5 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res, int cpu_map__cpu(struct perf_cpu_map *cpus, int idx); bool cpu_map__has(struct perf_cpu_map *cpus, int cpu); + #endif /* __PERF_CPUMAP_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index fdce590d2278..1548237b6558 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -18,6 +18,7 @@ #include "debug.h" #include "units.h" #include <internal/lib.h> // page_size +#include "affinity.h" #include "../perf.h" #include "asm/bug.h" #include "bpf-event.h" @@ -342,14 +343,63 @@ static int perf_evlist__nr_threads(struct evlist *evlist, return perf_thread_map__nr(evlist->core.threads); } +void evlist__cpu_iter_start(struct evlist *evlist) +{ + struct evsel *pos; + + /* + * Reset the per evsel cpu_iter. This is needed because + * each evsel's cpumap may have a different index space, + * and some operations need the index to modify + * the FD xyarray (e.g. open, close) + */ + evlist__for_each_entry(evlist, pos) + pos->cpu_iter = 0; +} + +bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu) +{ + if (ev->cpu_iter >= ev->core.cpus->nr) + return true; + if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu) + return true; + return false; +} + +bool evsel__cpu_iter_skip(struct evsel *ev, int cpu) +{ + if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) { + ev->cpu_iter++; + return false; + } + return true; +} + void evlist__disable(struct evlist *evlist) { struct evsel *pos; + struct affinity affinity; + int cpu, i; + + if (affinity__setup(&affinity) < 0) + return; + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evlist, pos) { + if (evsel__cpu_iter_skip(pos, cpu)) + continue; + if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd) + continue; + evsel__disable_cpu(pos, pos->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); evlist__for_each_entry(evlist, pos) { - if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd) + if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) continue; - evsel__disable(pos); + pos->disabled = true; } evlist->enabled = false; @@ -358,11 +408,28 @@ void evlist__disable(struct evlist *evlist) void evlist__enable(struct evlist *evlist) { struct evsel *pos; + struct affinity affinity; + int cpu, i; + + if (affinity__setup(&affinity) < 0) + return; + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evlist, pos) { + if (evsel__cpu_iter_skip(pos, cpu)) + continue; + if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) + continue; + evsel__enable_cpu(pos, pos->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); evlist__for_each_entry(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) continue; - evsel__enable(pos); + pos->disabled = false; } evlist->enabled = true; @@ -1137,9 +1204,35 @@ void perf_evlist__set_selected(struct evlist *evlist, void evlist__close(struct evlist *evlist) { struct evsel *evsel; + struct affinity affinity; + int cpu, i; - evlist__for_each_entry_reverse(evlist, evsel) - evsel__close(evsel); + /* + * With perf record core.cpus is usually NULL. + * Use the old method to handle this for now. + */ + if (!evlist->core.cpus) { + evlist__for_each_entry_reverse(evlist, evsel) + evsel__close(evsel); + return; + } + + if (affinity__setup(&affinity) < 0) + return; + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry_reverse(evlist, evsel) { + if (evsel__cpu_iter_skip(evsel, cpu)) + continue; + perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); + evlist__for_each_entry_reverse(evlist, evsel) { + perf_evsel__free_fd(&evsel->core); + perf_evsel__free_id(&evsel->core); + } } static int perf_evlist__create_syswide_maps(struct evlist *evlist) @@ -1577,7 +1670,8 @@ void perf_evlist__force_leader(struct evlist *evlist) } struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, - struct evsel *evsel) + struct evsel *evsel, + bool close) { struct evsel *c2, *leader; bool is_open = true; @@ -1594,10 +1688,15 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, if (c2 == evsel) is_open = false; if (c2->leader == leader) { - if (is_open) + if (is_open && close) perf_evsel__close(&c2->core); c2->leader = c2; c2->core.nr_members = 0; + /* + * Set this for all former members of the group + * to indicate they get reopened. + */ + c2->reset_group = true; } } return leader; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 3655b9ebb147..f5bd5c386df1 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -334,9 +334,17 @@ void perf_evlist__to_front(struct evlist *evlist, #define evlist__for_each_entry_safe(evlist, tmp, evsel) \ __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel) +#define evlist__for_each_cpu(evlist, index, cpu) \ + evlist__cpu_iter_start(evlist); \ + perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus) + void perf_evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel); +void evlist__cpu_iter_start(struct evlist *evlist); +bool evsel__cpu_iter_skip(struct evsel *ev, int cpu); +bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu); + struct evsel * perf_evlist__find_evsel_by_str(struct evlist *evlist, const char *str); @@ -348,5 +356,6 @@ bool perf_evlist__exclude_kernel(struct evlist *evlist); void perf_evlist__force_leader(struct evlist *evlist); struct evsel *perf_evlist__reset_weak_group(struct evlist *evlist, - struct evsel *evsel); + struct evsel *evsel, + bool close); #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f4dea055b080..a69e64236120 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1223,16 +1223,27 @@ int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter) return perf_evsel__append_filter(evsel, "%s,%s", filter); } +/* Caller has to clear disabled after going through all CPUs. */ +int evsel__enable_cpu(struct evsel *evsel, int cpu) +{ + return perf_evsel__enable_cpu(&evsel->core, cpu); +} + int evsel__enable(struct evsel *evsel) { int err = perf_evsel__enable(&evsel->core); if (!err) evsel->disabled = false; - return err; } +/* Caller has to set disabled after going through all CPUs. */ +int evsel__disable_cpu(struct evsel *evsel, int cpu) +{ + return perf_evsel__disable_cpu(&evsel->core, cpu); +} + int evsel__disable(struct evsel *evsel) { int err = perf_evsel__disable(&evsel->core); @@ -1587,8 +1598,9 @@ static int perf_event_open(struct evsel *evsel, return fd; } -int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, - struct perf_thread_map *threads) +static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads, + int start_cpu, int end_cpu) { int cpu, thread, nthreads; unsigned long flags = PERF_FLAG_FD_CLOEXEC; @@ -1665,7 +1677,7 @@ retry_sample_id: display_attr(&evsel->core.attr); - for (cpu = 0; cpu < cpus->nr; cpu++) { + for (cpu = start_cpu; cpu < end_cpu; cpu++) { for (thread = 0; thread < nthreads; thread++) { int fd, group_fd; @@ -1843,6 +1855,12 @@ out_close: return err; } +int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads) +{ + return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1); +} + void evsel__close(struct evsel *evsel) { perf_evsel__close(&evsel->core); @@ -1850,9 +1868,14 @@ void evsel__close(struct evsel *evsel) } int perf_evsel__open_per_cpu(struct evsel *evsel, - struct perf_cpu_map *cpus) + struct perf_cpu_map *cpus, + int cpu) { - return evsel__open(evsel, cpus, NULL); + if (cpu == -1) + return evsel__open_cpu(evsel, cpus, NULL, 0, + cpus ? cpus->nr : 1); + + return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1); } int perf_evsel__open_per_thread(struct evsel *evsel, diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index ddc5ee6f6592..dc14f4a823cd 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -86,6 +86,7 @@ struct evsel { struct list_head config_terms; struct bpf_object *bpf_obj; int bpf_fd; + int err; bool auto_merge_stats; bool merged_stat; const char * metric_expr; @@ -94,7 +95,10 @@ struct evsel { struct evsel *metric_leader; bool collect_stat; bool weak_group; + bool reset_group; + bool errored; bool percore; + int cpu_iter; const char *pmu_name; struct { perf_evsel__sb_cb_t *cb; @@ -218,11 +222,14 @@ int perf_evsel__set_filter(struct evsel *evsel, const char *filter); int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter); int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter); +int evsel__enable_cpu(struct evsel *evsel, int cpu); int evsel__enable(struct evsel *evsel); int evsel__disable(struct evsel *evsel); +int evsel__disable_cpu(struct evsel *evsel, int cpu); int perf_evsel__open_per_cpu(struct evsel *evsel, - struct perf_cpu_map *cpus); + struct perf_cpu_map *cpus, + int cpu); int perf_evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads); int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index f9f18b8b1df9..aed49806a09b 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -8,15 +8,12 @@ */ #include <sys/types.h> -#include <stdio.h> -#include <getopt.h> #include <stddef.h> #include <libelf.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <inttypes.h> -#include <limits.h> #include <fcntl.h> #include <err.h> #ifdef HAVE_DWARF_SUPPORT @@ -31,8 +28,6 @@ #define NT_GNU_BUILD_ID 3 #endif -#define JVMTI - #define BUILD_ID_URANDOM /* different uuid for each run */ #ifdef HAVE_LIBCRYPTO @@ -511,44 +506,3 @@ error: return retval; } - -#ifndef JVMTI - -static unsigned char x86_code[] = { - 0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */ - 0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */ - 0xCD, 0x80 /* int $0x80 */ -}; - -static struct options options; - -int main(int argc, char **argv) -{ - int c, fd, ret; - - while ((c = getopt(argc, argv, "o:h")) != -1) { - switch (c) { - case 'o': - options.output = optarg; - break; - case 'h': - printf("Usage: genelf -o output_file [-h]\n"); - return 0; - default: - errx(1, "unknown option"); - } - } - - fd = open(options.output, O_CREAT|O_TRUNC|O_RDWR, 0666); - if (fd == -1) - err(1, "cannot create file %s", options.output); - - ret = jit_write_elf(fd, "main", x86_code, sizeof(x86_code)); - close(fd); - - if (ret != 0) - unlink(options.output); - - return ret; -} -#endif diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index becc2d109423..93ad27830e2b 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -850,7 +850,7 @@ int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) */ int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) { - return -1; + return ENOSYS; /* Not implemented */ } static int write_cpuid(struct feat_fd *ff, @@ -1089,21 +1089,18 @@ static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); } -static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) +#define MAX_CACHE_LVL 4 + +static int build_caches(struct cpu_cache_level caches[], u32 *cntp) { u32 i, cnt = 0; - long ncpus; u32 nr, cpu; u16 level; - ncpus = sysconf(_SC_NPROCESSORS_CONF); - if (ncpus < 0) - return -1; - - nr = (u32)(ncpus & UINT_MAX); + nr = cpu__max_cpu(); for (cpu = 0; cpu < nr; cpu++) { - for (level = 0; level < 10; level++) { + for (level = 0; level < MAX_CACHE_LVL; level++) { struct cpu_cache_level c; int err; @@ -1123,18 +1120,12 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) caches[cnt++] = c; else cpu_cache_level__free(&c); - - if (WARN_ONCE(cnt == size, "way too many cpu caches..")) - goto out; } } - out: *cntp = cnt; return 0; } -#define MAX_CACHE_LVL 4 - static int write_cache(struct feat_fd *ff, struct evlist *evlist __maybe_unused) { @@ -1143,7 +1134,7 @@ static int write_cache(struct feat_fd *ff, u32 cnt = 0, i, version = 1; int ret; - ret = build_caches(caches, max_caches, &cnt); + ret = build_caches(caches, &cnt); if (ret) goto out; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 45286900aacb..0aa63aeb58ec 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -339,10 +339,10 @@ static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format) list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list) #define hists__for_each_format(hists, format) \ - perf_hpp_list__for_each_format((hists)->hpp_list, fmt) + perf_hpp_list__for_each_format((hists)->hpp_list, format) #define hists__for_each_sort_list(hists, format) \ - perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt) + perf_hpp_list__for_each_sort_list((hists)->hpp_list, format) extern struct perf_hpp_fmt perf_hpp__format[]; diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h index f01d48a8d707..b8a5159361b4 100644 --- a/tools/perf/util/include/linux/linkage.h +++ b/tools/perf/util/include/linux/linkage.h @@ -5,10 +5,93 @@ /* linkage.h ... for including arch/x86/lib/memcpy_64.S */ -#define ENTRY(name) \ - .globl name; \ +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */ +#ifndef ASM_NL +#define ASM_NL ; +#endif + +#ifndef __ALIGN +#define __ALIGN .align 4,0x90 +#define __ALIGN_STR ".align 4,0x90" +#endif + +/* SYM_T_FUNC -- type used by assembler to mark functions */ +#ifndef SYM_T_FUNC +#define SYM_T_FUNC STT_FUNC +#endif + +/* SYM_A_* -- align the symbol? */ +#define SYM_A_ALIGN ALIGN + +/* SYM_L_* -- linkage of symbols */ +#define SYM_L_GLOBAL(name) .globl name +#define SYM_L_LOCAL(name) /* nothing */ + +#define ALIGN __ALIGN + +/* === generic annotations === */ + +/* SYM_ENTRY -- use only if you have to for non-paired symbols */ +#ifndef SYM_ENTRY +#define SYM_ENTRY(name, linkage, align...) \ + linkage(name) ASM_NL \ + align ASM_NL \ name: +#endif + +/* SYM_START -- use only if you have to */ +#ifndef SYM_START +#define SYM_START(name, linkage, align...) \ + SYM_ENTRY(name, linkage, align) +#endif + +/* SYM_END -- use only if you have to */ +#ifndef SYM_END +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name +#endif + +/* + * SYM_FUNC_START_ALIAS -- use where there are two global names for one + * function + */ +#ifndef SYM_FUNC_START_ALIAS +#define SYM_FUNC_START_ALIAS(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START -- use for global functions */ +#ifndef SYM_FUNC_START +/* + * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two + * later. + */ +#define SYM_FUNC_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START_LOCAL -- use for local functions */ +#ifndef SYM_FUNC_START_LOCAL +/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ +#ifndef SYM_FUNC_END_ALIAS +#define SYM_FUNC_END_ALIAS(name) \ + SYM_END(name, SYM_T_FUNC) +#endif -#define ENDPROC(name) +/* + * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, + * SYM_FUNC_START_WEAK, ... + */ +#ifndef SYM_FUNC_END +/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_END(name) \ + SYM_END(name, SYM_T_FUNC) +#endif #endif /* PERF_LINUX_LINKAGE_H_ */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 416d174d223c..c8c5410315e8 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2446,6 +2446,7 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms list_for_each_entry(ilist, &inline_node->val, list) { struct map_symbol ilist_ms = { + .maps = ms->maps, .map = map, .sym = ilist->symbol, }; diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 6a4d350d5cdb..02aee946b6c1 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -103,8 +103,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, if (!strcmp(ev->name, ids[i])) { if (!metric_events[i]) metric_events[i] = ev; + i++; + if (i == idnum) + break; } else { - if (++i == idnum) { + if (i + 1 == idnum) { /* Discard the whole match and start again */ i = 0; memset(metric_events, 0, @@ -124,7 +127,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, } } - if (i != idnum - 1) { + if (i != idnum) { /* Not whole match */ return NULL; } diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 345b5ccc90f6..9fcba2872130 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2681,12 +2681,12 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str, ret = sort_dimension__add(list, tok, evlist, level); if (ret == -EINVAL) { if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) - pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); + ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); else - pr_err("Invalid --sort key: `%s'", tok); + ui__error("Invalid --sort key: `%s'", tok); break; } else if (ret == -ESRCH) { - pr_err("Unknown --sort key: `%s'", tok); + ui__error("Unknown --sort key: `%s'", tok); break; } } @@ -2743,7 +2743,7 @@ static int setup_sort_order(struct evlist *evlist) return 0; if (sort_order[1] == '\0') { - pr_err("Invalid --sort key: `+'"); + ui__error("Invalid --sort key: `+'"); return -EINVAL; } @@ -2959,6 +2959,9 @@ int output_field_add(struct perf_hpp_list *list, char *tok) if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sort__mode != SORT_MODE__MEMORY) + return -EINVAL; + return __sort_dimension__add_output(list, sd); } @@ -2968,6 +2971,9 @@ int output_field_add(struct perf_hpp_list *list, char *tok) if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sort__mode != SORT_MODE__BRANCH) + return -EINVAL; + return __sort_dimension__add_output(list, sd); } @@ -3034,7 +3040,7 @@ static int __setup_output_field(void) strp++; if (!strlen(strp)) { - pr_err("Invalid --fields key: `+'"); + ui__error("Invalid --fields key: `+'"); goto out; } diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 332cb730785b..5f26137b8d60 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -464,7 +464,8 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) int create_perf_stat_counter(struct evsel *evsel, struct perf_stat_config *config, - struct target *target) + struct target *target, + int cpu) { struct perf_event_attr *attr = &evsel->core.attr; struct evsel *leader = evsel->leader; @@ -518,7 +519,7 @@ int create_perf_stat_counter(struct evsel *evsel, } if (target__has_cpu(target) && !target__has_per_thread(target)) - return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel)); + return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu); return perf_evsel__open_per_thread(evsel, evsel->core.threads); } diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index bfa9aaf36ce6..fb990efa54a8 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -214,7 +214,8 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp); int create_perf_stat_counter(struct evsel *evsel, struct perf_stat_config *config, - struct target *target); + struct target *target, + int cpu); void perf_evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config, diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 6658fbf196e6..1965aefccb02 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -920,6 +920,9 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, if (curr_map == NULL) return -1; + if (curr_dso->kernel) + map__kmap(curr_map)->kmaps = kmaps; + if (adjust_kernel_syms) { curr_map->start = shdr->sh_addr + ref_reloc(kmap); curr_map->end = curr_map->start + shdr->sh_size; diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index efe06d621983..e59eb9e7f923 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -31,15 +31,12 @@ class KunitStatus(Enum): TEST_FAILURE = auto() def create_default_kunitconfig(): - if not os.path.exists(kunit_kernel.KUNITCONFIG_PATH): + if not os.path.exists(kunit_kernel.kunitconfig_path): shutil.copyfile('arch/um/configs/kunit_defconfig', - kunit_kernel.KUNITCONFIG_PATH) + kunit_kernel.kunitconfig_path) def run_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitRequest) -> KunitResult: - if request.defconfig: - create_default_kunitconfig() - config_start = time.time() success = linux.build_reconfig(request.build_dir) config_end = time.time() @@ -108,15 +105,22 @@ def main(argv, linux=None): run_parser.add_argument('--build_dir', help='As in the make command, it specifies the build ' 'directory.', - type=str, default=None, metavar='build_dir') + type=str, default='', metavar='build_dir') run_parser.add_argument('--defconfig', - help='Uses a default kunitconfig.', + help='Uses a default .kunitconfig.', action='store_true') cli_args = parser.parse_args(argv) if cli_args.subcommand == 'run': + if cli_args.build_dir: + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) + kunit_kernel.kunitconfig_path = os.path.join( + cli_args.build_dir, + kunit_kernel.kunitconfig_path) + if cli_args.defconfig: create_default_kunitconfig() diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index bf3876835331..cc5d844ecca1 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -14,7 +14,7 @@ import os import kunit_config KCONFIG_PATH = '.config' -KUNITCONFIG_PATH = 'kunitconfig' +kunitconfig_path = '.kunitconfig' class ConfigError(Exception): """Represents an error trying to configure the Linux kernel.""" @@ -82,7 +82,7 @@ class LinuxSourceTree(object): def __init__(self): self._kconfig = kunit_config.Kconfig() - self._kconfig.read_from_file(KUNITCONFIG_PATH) + self._kconfig.read_from_file(kunitconfig_path) self._ops = LinuxSourceTreeOperations() def clean(self): @@ -111,7 +111,7 @@ class LinuxSourceTree(object): return True def build_reconfig(self, build_dir): - """Creates a new .config if it is not a subset of the kunitconfig.""" + """Creates a new .config if it is not a subset of the .kunitconfig.""" kconfig_path = get_kconfig_path(build_dir) if os.path.exists(kconfig_path): existing_kconfig = kunit_config.Kconfig() @@ -140,10 +140,10 @@ class LinuxSourceTree(object): return False return True - def run_kernel(self, args=[], timeout=None, build_dir=None): + def run_kernel(self, args=[], timeout=None, build_dir=''): args.extend(['mem=256M']) process = self._ops.linux_bin(args, timeout, build_dir) - with open('test.log', 'w') as f: + with open(os.path.join(build_dir, 'test.log'), 'w') as f: for line in process.stdout: f.write(line.rstrip().decode('ascii') + '\n') yield line.rstrip().decode('ascii') diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 4a12baa0cd4e..cba97756ac4a 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -174,6 +174,7 @@ class KUnitMainTest(unittest.TestCase): kunit.main(['run'], self.linux_source_mock) assert self.linux_source_mock.build_reconfig.call_count == 1 assert self.linux_source_mock.run_kernel.call_count == 1 + self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_passes_args_fail(self): @@ -199,7 +200,14 @@ class KUnitMainTest(unittest.TestCase): timeout = 3453 kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock) assert self.linux_source_mock.build_reconfig.call_count == 1 - self.linux_source_mock.run_kernel.assert_called_once_with(timeout=timeout) + self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=timeout) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_run_builddir(self): + build_dir = '.kunit' + kunit.main(['run', '--build_dir', build_dir], self.linux_source_mock) + assert self.linux_source_mock.build_reconfig.call_count == 1 + self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) if __name__ == '__main__': diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild index c4a9196d794c..6aca8d5be159 100644 --- a/tools/testing/nvdimm/Kbuild +++ b/tools/testing/nvdimm/Kbuild @@ -5,6 +5,7 @@ ldflags-y += --wrap=devm_ioremap_nocache ldflags-y += --wrap=devm_memremap ldflags-y += --wrap=devm_memunmap ldflags-y += --wrap=ioremap_nocache +ldflags-y += --wrap=ioremap ldflags-y += --wrap=iounmap ldflags-y += --wrap=memunmap ldflags-y += --wrap=__devm_request_region diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index 3f55f2f99112..6271ac757a4b 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c @@ -193,6 +193,12 @@ void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size) } EXPORT_SYMBOL(__wrap_ioremap_nocache); +void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size) +{ + return __nfit_test_ioremap(offset, size, ioremap); +} +EXPORT_SYMBOL(__wrap_ioremap); + void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size) { return __nfit_test_ioremap(offset, size, ioremap_wc); diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 419652458da4..1ff0a9f49c01 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -40,3 +40,4 @@ xdping test_cpp /no_alu32 /bpf_gcc +bpf_helper_defs.h diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index e0fe01d9ec33..e2fd6f8d579c 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -120,9 +120,9 @@ force: $(BPFOBJ): force $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ -BPF_HELPERS := $(BPFDIR)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h) -$(BPFDIR)/bpf_helper_defs.h: - $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ bpf_helper_defs.h +BPF_HELPERS := $(OUTPUT)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h) +$(OUTPUT)/bpf_helper_defs.h: + $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ $(OUTPUT)/bpf_helper_defs.h # Get Clang's default includes on this system, as opposed to those seen by # '-target bpf'. This fixes "missing" files on some architectures/distros, diff --git a/tools/testing/selftests/bpf/test_ftrace.sh b/tools/testing/selftests/bpf/test_ftrace.sh new file mode 100755 index 000000000000..20de7bb873bc --- /dev/null +++ b/tools/testing/selftests/bpf/test_ftrace.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +TR=/sys/kernel/debug/tracing/ +clear_trace() { # reset trace output + echo > $TR/trace +} + +disable_tracing() { # stop trace recording + echo 0 > $TR/tracing_on +} + +enable_tracing() { # start trace recording + echo 1 > $TR/tracing_on +} + +reset_tracer() { # reset the current tracer + echo nop > $TR/current_tracer +} + +disable_tracing +clear_trace + +echo "" > $TR/set_ftrace_filter +echo '*printk* *console* *wake* *serial* *lock*' > $TR/set_ftrace_notrace + +echo "bpf_prog_test*" > $TR/set_graph_function +echo "" > $TR/set_graph_notrace + +echo function_graph > $TR/current_tracer + +enable_tracing +./test_progs -t fentry +./test_progs -t fexit +disable_tracing +clear_trace + +reset_tracer + +exit 0 diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index d27fd929abb9..87eaa49609a0 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -408,10 +408,10 @@ static void update_map(int fd, int index) assert(!bpf_map_update_elem(fd, &index, &value, 0)); } -static int create_prog_dummy1(enum bpf_prog_type prog_type) +static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret) { struct bpf_insn prog[] = { - BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, ret), BPF_EXIT_INSN(), }; @@ -419,14 +419,15 @@ static int create_prog_dummy1(enum bpf_prog_type prog_type) ARRAY_SIZE(prog), "GPL", 0, NULL, 0); } -static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx) +static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd, + int idx, int ret) { struct bpf_insn prog[] = { BPF_MOV64_IMM(BPF_REG_3, idx), BPF_LD_MAP_FD(BPF_REG_2, mfd), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 41), + BPF_MOV64_IMM(BPF_REG_0, ret), BPF_EXIT_INSN(), }; @@ -435,10 +436,9 @@ static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx) } static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, - int p1key) + int p1key, int p2key, int p3key) { - int p2key = 1; - int mfd, p1fd, p2fd; + int mfd, p1fd, p2fd, p3fd; mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), sizeof(int), max_elem, 0); @@ -449,23 +449,24 @@ static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, return -1; } - p1fd = create_prog_dummy1(prog_type); - p2fd = create_prog_dummy2(prog_type, mfd, p2key); - if (p1fd < 0 || p2fd < 0) - goto out; + p1fd = create_prog_dummy_simple(prog_type, 42); + p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41); + p3fd = create_prog_dummy_simple(prog_type, 24); + if (p1fd < 0 || p2fd < 0 || p3fd < 0) + goto err; if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0) - goto out; + goto err; if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0) - goto out; + goto err; + if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) { +err: + close(mfd); + mfd = -1; + } + close(p3fd); close(p2fd); close(p1fd); - return mfd; -out: - close(p2fd); - close(p1fd); - close(mfd); - return -1; } static int create_map_in_map(void) @@ -684,7 +685,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, } if (*fixup_prog1) { - map_fds[4] = create_prog_array(prog_type, 4, 0); + map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2); do { prog[*fixup_prog1].imm = map_fds[4]; fixup_prog1++; @@ -692,7 +693,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, } if (*fixup_prog2) { - map_fds[5] = create_prog_array(prog_type, 8, 7); + map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2); do { prog[*fixup_prog2].imm = map_fds[5]; fixup_prog2++; diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index ebcbf154c460..604b46151736 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -455,7 +455,7 @@ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7), /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_3, 3), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), @@ -478,7 +478,7 @@ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), BPF_EMIT_CALL(BPF_FUNC_sk_release), /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_3, 3), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), @@ -497,7 +497,7 @@ BPF_SK_LOOKUP(sk_lookup_tcp), /* bpf_tail_call() */ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_3, 3), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c index a9a8f620e71c..94c399d1faca 100644 --- a/tools/testing/selftests/bpf/verifier/runtime_jit.c +++ b/tools/testing/selftests/bpf/verifier/runtime_jit.c @@ -27,6 +27,19 @@ { "runtime/jit: tail_call within bounds, no prog", .insns = { + BPF_MOV64_IMM(BPF_REG_3, 3), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 1 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "runtime/jit: tail_call within bounds, key 2", + .insns = { BPF_MOV64_IMM(BPF_REG_3, 2), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), @@ -35,9 +48,147 @@ }, .fixup_prog1 = { 1 }, .result = ACCEPT, + .retval = 24, +}, +{ + "runtime/jit: tail_call within bounds, key 2 / key 2, first branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 13), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5, 9 }, + .result = ACCEPT, + .retval = 24, +}, +{ + "runtime/jit: tail_call within bounds, key 2 / key 2, second branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 14), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5, 9 }, + .result = ACCEPT, + .retval = 24, +}, +{ + "runtime/jit: tail_call within bounds, key 0 / key 2, first branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 13), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5, 9 }, + .result = ACCEPT, + .retval = 24, +}, +{ + "runtime/jit: tail_call within bounds, key 0 / key 2, second branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 14), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5, 9 }, + .result = ACCEPT, + .retval = 42, +}, +{ + "runtime/jit: tail_call within bounds, different maps, first branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 13), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5 }, + .fixup_prog2 = { 9 }, + .result_unpriv = REJECT, + .errstr_unpriv = "tail_call abusing map_ptr", + .result = ACCEPT, .retval = 1, }, { + "runtime/jit: tail_call within bounds, different maps, second branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 14), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5 }, + .fixup_prog2 = { 9 }, + .result_unpriv = REJECT, + .errstr_unpriv = "tail_call abusing map_ptr", + .result = ACCEPT, + .retval = 42, +}, +{ "runtime/jit: tail_call out of bounds", .insns = { BPF_MOV64_IMM(BPF_REG_3, 256), diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh index 47315fe48d5a..24dd8ed48580 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh @@ -232,7 +232,7 @@ test_mc_aware() stop_traffic local ucth1=${uc_rate[1]} - start_traffic $h1 own bc bc + start_traffic $h1 192.0.2.65 bc bc local d0=$(date +%s) local t0=$(ethtool_stats_get $h3 rx_octets_prio_0) @@ -254,7 +254,11 @@ test_mc_aware() ret = 100 * ($ucth1 - $ucth2) / $ucth1 if (ret > 0) { ret } else { 0 } ") - check_err $(bc <<< "$deg > 25") + + # Minimum shaper of 200Mbps on MC TCs should cause about 20% of + # degradation on 1Gbps link. + check_err $(bc <<< "$deg < 15") "Minimum shaper not in effect" + check_err $(bc <<< "$deg > 25") "MC traffic degrades UC performance too much" local interval=$((d1 - d0)) local mc_ir=$(rate $u0 $u1 $interval) diff --git a/tools/testing/selftests/filesystems/epoll/Makefile b/tools/testing/selftests/filesystems/epoll/Makefile index e62f3d4f68da..78ae4aaf7141 100644 --- a/tools/testing/selftests/filesystems/epoll/Makefile +++ b/tools/testing/selftests/filesystems/epoll/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS += -I../../../../../usr/include/ -LDFLAGS += -lpthread +LDLIBS += -lpthread TEST_GEN_PROGS := epoll_wakeup_test include ../../lib.mk diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh index b879305a766d..5b8c0fedee76 100755 --- a/tools/testing/selftests/firmware/fw_lib.sh +++ b/tools/testing/selftests/firmware/fw_lib.sh @@ -34,6 +34,12 @@ test_modprobe() check_mods() { + local uid=$(id -u) + if [ $uid -ne 0 ]; then + echo "skip all tests: must be run as root" >&2 + exit $ksft_skip + fi + trap "test_modprobe" EXIT if [ ! -d $DIR ]; then modprobe test_firmware diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc index 36fb59f886ea..1a52f2883fe0 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc @@ -3,6 +3,8 @@ # description: ftrace - stacktrace filter command # flags: instance +[ ! -f set_ftrace_filter ] && exit_unsupported + echo _do_fork:stacktrace >> set_ftrace_filter grep -q "_do_fork:stacktrace:unlimited" set_ftrace_filter diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc index 86a1f07ef2ca..71fa3f49e35e 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc @@ -15,6 +15,11 @@ if [ $NP -eq 1 ] ;then exit_unresolved fi +if ! grep -q "function" available_tracers ; then + echo "Function trace is not enabled" + exit_unsupported +fi + ORIG_CPUMASK=`cat tracing_cpumask` do_reset() { diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index 86986c4bba54..5d4550591ff9 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions @@ -46,6 +46,9 @@ reset_events_filter() { # reset all current setting filters } reset_ftrace_filter() { # reset all triggers in set_ftrace_filter + if [ ! -f set_ftrace_filter ]; then + return 0 + fi echo > set_ftrace_filter grep -v '^#' set_ftrace_filter | while read t; do tr=`echo $t | cut -d: -f2` @@ -93,7 +96,7 @@ initialize_ftrace() { # Reset ftrace to initial-state disable_events [ -f set_event_pid ] && echo > set_event_pid [ -f set_ftrace_pid ] && echo > set_ftrace_pid - [ -f set_ftrace_filter ] && echo | tee set_ftrace_* + [ -f set_ftrace_notrace ] && echo > set_ftrace_notrace [ -f set_graph_function ] && echo | tee set_graph_* [ -f stack_trace_filter ] && echo > stack_trace_filter [ -f kprobe_events ] && echo > kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc index 5862eee91e1d..6e3dbe5f96b7 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc @@ -20,9 +20,9 @@ while read i; do test $N -eq 256 && break done -L=`wc -l kprobe_events` -if [ $L -ne $N ]; then - echo "The number of kprobes events ($L) is not $N" +L=`cat kprobe_events | wc -l` +if [ $L -ne 256 ]; then + echo "The number of kprobes events ($L) is not 256" exit_fail fi diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc index 1221240f8cf6..3f2aee115f6e 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc @@ -21,10 +21,10 @@ grep -q "snapshot()" README || exit_unsupported # version issue echo "Test expected snapshot action failure" -echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail +echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> events/sched/sched_waking/trigger && exit_fail echo "Test expected save action failure" -echo 'hist:keys=comm:onmatch(sched.sched_wakeup).save(comm,prio)' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail +echo 'hist:keys=comm:onmatch(sched.sched_wakeup).save(comm,prio)' >> events/sched/sched_waking/trigger && exit_fail exit_xfail diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc index 064a284e4e75..c80007aa9f86 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc @@ -16,7 +16,7 @@ grep -q "onchange(var)" README || exit_unsupported # version issue echo "Test onchange action" -echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger +echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> events/sched/sched_waking/trigger ping $LOCALHOST -c 3 nice -n 1 ping $LOCALHOST -c 3 diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc index 18fff69fc433..f546c1b66a9b 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc @@ -23,9 +23,9 @@ grep -q "snapshot()" README || exit_unsupported # version issue echo "Test snapshot action" -echo 1 > /sys/kernel/debug/tracing/events/sched/enable +echo 1 > events/sched/enable -echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio):onchange($newprio).snapshot() if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger +echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio):onchange($newprio).snapshot() if comm=="ping"' >> events/sched/sched_waking/trigger ping $LOCALHOST -c 3 nice -n 1 ping $LOCALHOST -c 3 diff --git a/tools/testing/selftests/kselftest/module.sh b/tools/testing/selftests/kselftest/module.sh index 18e1c7992d30..fb4733faff12 100755 --- a/tools/testing/selftests/kselftest/module.sh +++ b/tools/testing/selftests/kselftest/module.sh @@ -9,7 +9,7 @@ # # #!/bin/sh # SPDX-License-Identifier: GPL-2.0+ -# $(dirname $0)/../kselftest_module.sh "description" module_name +# $(dirname $0)/../kselftest/module.sh "description" module_name # # Example: tools/testing/selftests/lib/printf.sh diff --git a/tools/testing/selftests/kselftest/prefix.pl b/tools/testing/selftests/kselftest/prefix.pl index ec7e48118183..31f7c2a0a8bd 100755 --- a/tools/testing/selftests/kselftest/prefix.pl +++ b/tools/testing/selftests/kselftest/prefix.pl @@ -3,6 +3,7 @@ # Prefix all lines with "# ", unbuffered. Command being piped in may need # to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd". use strict; +use IO::Handle; binmode STDIN; binmode STDOUT; diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh index 84de7bc74f2c..a8d20cbb711c 100644 --- a/tools/testing/selftests/kselftest/runner.sh +++ b/tools/testing/selftests/kselftest/runner.sh @@ -79,6 +79,7 @@ run_one() if [ $rc -eq $skip_rc ]; then \ echo "not ok $test_num $TEST_HDR_MSG # SKIP" elif [ $rc -eq $timeout_rc ]; then \ + echo "#" echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT" else echo "not ok $test_num $TEST_HDR_MSG # exit=$rc" diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh index 31eb09e38729..a6e3d5517a6f 100644 --- a/tools/testing/selftests/livepatch/functions.sh +++ b/tools/testing/selftests/livepatch/functions.sh @@ -7,6 +7,9 @@ MAX_RETRIES=600 RETRY_INTERVAL=".1" # seconds +# Kselftest framework requirement - SKIP code is 4 +ksft_skip=4 + # log(msg) - write message to kernel log # msg - insightful words function log() { @@ -18,7 +21,16 @@ function log() { function skip() { log "SKIP: $1" echo "SKIP: $1" >&2 - exit 4 + exit $ksft_skip +} + +# root test +function is_root() { + uid=$(id -u) + if [ $uid -ne 0 ]; then + echo "skip all tests: must be run as root" >&2 + exit $ksft_skip + fi } # die(msg) - game over, man @@ -62,6 +74,7 @@ function set_ftrace_enabled() { # for verbose livepatching output and turn on # the ftrace_enabled sysctl. function setup_config() { + is_root push_config set_dynamic_debug set_ftrace_enabled 1 diff --git a/tools/testing/selftests/livepatch/test-state.sh b/tools/testing/selftests/livepatch/test-state.sh index dc2908c22c26..a08212708115 100755 --- a/tools/testing/selftests/livepatch/test-state.sh +++ b/tools/testing/selftests/livepatch/test-state.sh @@ -8,8 +8,7 @@ MOD_LIVEPATCH=test_klp_state MOD_LIVEPATCH2=test_klp_state2 MOD_LIVEPATCH3=test_klp_state3 -set_dynamic_debug - +setup_config # TEST: Loading and removing a module that modifies the system state diff --git a/tools/testing/selftests/net/forwarding/loopback.sh b/tools/testing/selftests/net/forwarding/loopback.sh index 6e4626ae71b0..8f4057310b5b 100755 --- a/tools/testing/selftests/net/forwarding/loopback.sh +++ b/tools/testing/selftests/net/forwarding/loopback.sh @@ -1,6 +1,9 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + ALL_TESTS="loopback_test" NUM_NETIFS=2 source tc_common.sh @@ -72,6 +75,11 @@ setup_prepare() h1_create h2_create + + if ethtool -k $h1 | grep loopback | grep -q fixed; then + log_test "SKIP: dev $h1 does not support loopback feature" + exit $ksft_skip + fi } cleanup() diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh index fef88eb4b873..fa6a88c50750 100755 --- a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh +++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh @@ -36,7 +36,7 @@ h2_destroy() { ip -6 route del 2001:db8:1::/64 vrf v$h2 ip -4 route del 192.0.2.0/28 vrf v$h2 - simple_if_fini $h2 192.0.2.130/28 + simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64 } router_create() diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index d697815d2785..71a62e7e35b1 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -11,9 +11,9 @@ # R1 and R2 (also implemented with namespaces), with different MTUs: # # segment a_r1 segment b_r1 a_r1: 2000 -# .--------------R1--------------. a_r2: 1500 -# A B a_r3: 2000 -# '--------------R2--------------' a_r4: 1400 +# .--------------R1--------------. b_r1: 1400 +# A B a_r2: 2000 +# '--------------R2--------------' b_r2: 1500 # segment a_r2 segment b_r2 # # Check that PMTU exceptions with the correct PMTU are created. Then diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 13e5ef615026..0ea44d975b6c 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -722,34 +722,6 @@ TEST_F(tls, recv_lowat) EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0); } -TEST_F(tls, recv_rcvbuf) -{ - char send_mem[4096]; - char recv_mem[4096]; - int rcv_buf = 1024; - - memset(send_mem, 0x1c, sizeof(send_mem)); - - EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVBUF, - &rcv_buf, sizeof(rcv_buf)), 0); - - EXPECT_EQ(send(self->fd, send_mem, 512, 0), 512); - memset(recv_mem, 0, sizeof(recv_mem)); - EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), 512); - EXPECT_EQ(memcmp(send_mem, recv_mem, 512), 0); - - if (self->notls) - return; - - EXPECT_EQ(send(self->fd, send_mem, 4096, 0), 4096); - memset(recv_mem, 0, sizeof(recv_mem)); - EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1); - EXPECT_EQ(errno, EMSGSIZE); - - EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1); - EXPECT_EQ(errno, EMSGSIZE); -} - TEST_F(tls, bidir) { char const *test_str = "test_read"; diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh index 16571ac1dab4..d3e0809ab368 100755 --- a/tools/testing/selftests/netfilter/nft_flowtable.sh +++ b/tools/testing/selftests/netfilter/nft_flowtable.sh @@ -226,17 +226,19 @@ check_transfer() return 0 } -test_tcp_forwarding() +test_tcp_forwarding_ip() { local nsa=$1 local nsb=$2 + local dstip=$3 + local dstport=$4 local lret=0 ip netns exec $nsb nc -w 5 -l -p 12345 < "$ns2in" > "$ns2out" & lpid=$! sleep 1 - ip netns exec $nsa nc -w 4 10.0.2.99 12345 < "$ns1in" > "$ns1out" & + ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$ns1in" > "$ns1out" & cpid=$! sleep 3 @@ -258,6 +260,28 @@ test_tcp_forwarding() return $lret } +test_tcp_forwarding() +{ + test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345 + + return $? +} + +test_tcp_forwarding_nat() +{ + local lret + + test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345 + lret=$? + + if [ $lret -eq 0 ] ; then + test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666 + lret=$? + fi + + return $lret +} + make_file "$ns1in" "ns1" make_file "$ns2in" "ns2" @@ -283,14 +307,19 @@ ip -net ns2 route add 192.168.10.1 via 10.0.2.1 # Same, but with NAT enabled. ip netns exec nsr1 nft -f - <<EOF table ip nat { + chain prerouting { + type nat hook prerouting priority 0; policy accept; + meta iif "veth0" ip daddr 10.6.6.6 tcp dport 1666 counter dnat ip to 10.0.2.99:12345 + } + chain postrouting { type nat hook postrouting priority 0; policy accept; - meta oifname "veth1" masquerade + meta oifname "veth1" counter masquerade } } EOF -test_tcp_forwarding ns1 ns2 +test_tcp_forwarding_nat ns1 ns2 if [ $? -eq 0 ] ;then echo "PASS: flow offloaded for ns1/ns2 with NAT" @@ -313,7 +342,7 @@ fi ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null -test_tcp_forwarding ns1 ns2 +test_tcp_forwarding_nat ns1 ns2 if [ $? -eq 0 ] ;then echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery" else diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index 1be55e705780..d7e07f4c3d7f 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -8,9 +8,14 @@ ksft_skip=4 ret=0 test_inet_nat=true +sfx=$(mktemp -u "XXXXXXXX") +ns0="ns0-$sfx" +ns1="ns1-$sfx" +ns2="ns2-$sfx" + cleanup() { - for i in 0 1 2; do ip netns del ns$i;done + for i in 0 1 2; do ip netns del ns$i-"$sfx";done } nft --version > /dev/null 2>&1 @@ -25,40 +30,49 @@ if [ $? -ne 0 ];then exit $ksft_skip fi -ip netns add ns0 +ip netns add "$ns0" if [ $? -ne 0 ];then - echo "SKIP: Could not create net namespace" + echo "SKIP: Could not create net namespace $ns0" exit $ksft_skip fi trap cleanup EXIT -ip netns add ns1 -ip netns add ns2 +ip netns add "$ns1" +if [ $? -ne 0 ];then + echo "SKIP: Could not create net namespace $ns1" + exit $ksft_skip +fi + +ip netns add "$ns2" +if [ $? -ne 0 ];then + echo "SKIP: Could not create net namespace $ns2" + exit $ksft_skip +fi -ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1 +ip link add veth0 netns "$ns0" type veth peer name eth0 netns "$ns1" > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: No virtual ethernet pair device support in kernel" exit $ksft_skip fi -ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 +ip link add veth1 netns "$ns0" type veth peer name eth0 netns "$ns2" -ip -net ns0 link set lo up -ip -net ns0 link set veth0 up -ip -net ns0 addr add 10.0.1.1/24 dev veth0 -ip -net ns0 addr add dead:1::1/64 dev veth0 +ip -net "$ns0" link set lo up +ip -net "$ns0" link set veth0 up +ip -net "$ns0" addr add 10.0.1.1/24 dev veth0 +ip -net "$ns0" addr add dead:1::1/64 dev veth0 -ip -net ns0 link set veth1 up -ip -net ns0 addr add 10.0.2.1/24 dev veth1 -ip -net ns0 addr add dead:2::1/64 dev veth1 +ip -net "$ns0" link set veth1 up +ip -net "$ns0" addr add 10.0.2.1/24 dev veth1 +ip -net "$ns0" addr add dead:2::1/64 dev veth1 for i in 1 2; do - ip -net ns$i link set lo up - ip -net ns$i link set eth0 up - ip -net ns$i addr add 10.0.$i.99/24 dev eth0 - ip -net ns$i route add default via 10.0.$i.1 - ip -net ns$i addr add dead:$i::99/64 dev eth0 - ip -net ns$i route add default via dead:$i::1 + ip -net ns$i-$sfx link set lo up + ip -net ns$i-$sfx link set eth0 up + ip -net ns$i-$sfx addr add 10.0.$i.99/24 dev eth0 + ip -net ns$i-$sfx route add default via 10.0.$i.1 + ip -net ns$i-$sfx addr add dead:$i::99/64 dev eth0 + ip -net ns$i-$sfx route add default via dead:$i::1 done bad_counter() @@ -66,8 +80,9 @@ bad_counter() local ns=$1 local counter=$2 local expect=$3 + local tag=$4 - echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2 + echo "ERROR: $counter counter in $ns has unexpected value (expected $expect) at $tag" 1>&2 ip netns exec $ns nft list counter inet filter $counter 1>&2 } @@ -78,24 +93,24 @@ check_counters() cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84") if [ $? -ne 0 ]; then - bad_counter $ns ns0in "packets 1 bytes 84" + bad_counter $ns ns0in "packets 1 bytes 84" "check_counters 1" lret=1 fi cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84") if [ $? -ne 0 ]; then - bad_counter $ns ns0out "packets 1 bytes 84" + bad_counter $ns ns0out "packets 1 bytes 84" "check_counters 2" lret=1 fi expect="packets 1 bytes 104" cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter $ns ns0in6 "$expect" + bad_counter $ns ns0in6 "$expect" "check_counters 3" lret=1 fi cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter $ns ns0out6 "$expect" + bad_counter $ns ns0out6 "$expect" "check_counters 4" lret=1 fi @@ -107,41 +122,41 @@ check_ns0_counters() local ns=$1 local lret=0 - cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") if [ $? -ne 0 ]; then - bad_counter ns0 ns0in "packets 0 bytes 0" + bad_counter "$ns0" ns0in "packets 0 bytes 0" "check_ns0_counters 1" lret=1 fi - cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") if [ $? -ne 0 ]; then - bad_counter ns0 ns0in6 "packets 0 bytes 0" + bad_counter "$ns0" ns0in6 "packets 0 bytes 0" lret=1 fi - cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") if [ $? -ne 0 ]; then - bad_counter ns0 ns0out "packets 0 bytes 0" + bad_counter "$ns0" ns0out "packets 0 bytes 0" "check_ns0_counters 2" lret=1 fi - cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") if [ $? -ne 0 ]; then - bad_counter ns0 ns0out6 "packets 0 bytes 0" + bad_counter "$ns0" ns0out6 "packets 0 bytes 0" "check_ns0_counters3 " lret=1 fi for dir in "in" "out" ; do expect="packets 1 bytes 84" - cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ${ns}${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 $ns$dir "$expect" + bad_counter "$ns0" $ns$dir "$expect" "check_ns0_counters 4" lret=1 fi expect="packets 1 bytes 104" - cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 $ns$dir6 "$expect" + bad_counter "$ns0" $ns$dir6 "$expect" "check_ns0_counters 5" lret=1 fi done @@ -152,7 +167,7 @@ check_ns0_counters() reset_counters() { for i in 0 1 2;do - ip netns exec ns$i nft reset counters inet > /dev/null + ip netns exec ns$i-$sfx nft reset counters inet > /dev/null done } @@ -166,7 +181,7 @@ test_local_dnat6() IPF="ip6" fi -ip netns exec ns0 nft -f - <<EOF +ip netns exec "$ns0" nft -f /dev/stdin <<EOF table $family nat { chain output { type nat hook output priority 0; policy accept; @@ -180,7 +195,7 @@ EOF fi # ping netns1, expect rewrite to netns2 - ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null + ip netns exec "$ns0" ping -q -c 1 dead:1::99 > /dev/null if [ $? -ne 0 ]; then lret=1 echo "ERROR: ping6 failed" @@ -189,18 +204,18 @@ EOF expect="packets 0 bytes 0" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns1$dir "$expect" + bad_counter "$ns0" ns1$dir "$expect" "test_local_dnat6 1" lret=1 fi done expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns2$dir "$expect" + bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat6 2" lret=1 fi done @@ -208,9 +223,9 @@ EOF # expect 0 count in ns1 expect="packets 0 bytes 0" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_local_dnat6 3" lret=1 fi done @@ -218,15 +233,15 @@ EOF # expect 1 packet in ns2 expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns0$dir "$expect" + bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat6 4" lret=1 fi done - test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was $family NATted to ns2" - ip netns exec ns0 nft flush chain ip6 nat output + test $lret -eq 0 && echo "PASS: ipv6 ping to $ns1 was $family NATted to $ns2" + ip netns exec "$ns0" nft flush chain ip6 nat output return $lret } @@ -241,7 +256,7 @@ test_local_dnat() IPF="ip" fi -ip netns exec ns0 nft -f - <<EOF 2>/dev/null +ip netns exec "$ns0" nft -f /dev/stdin <<EOF 2>/dev/null table $family nat { chain output { type nat hook output priority 0; policy accept; @@ -260,7 +275,7 @@ EOF fi # ping netns1, expect rewrite to netns2 - ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null + ip netns exec "$ns0" ping -q -c 1 10.0.1.99 > /dev/null if [ $? -ne 0 ]; then lret=1 echo "ERROR: ping failed" @@ -269,18 +284,18 @@ EOF expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns1$dir "$expect" + bad_counter "$ns0" ns1$dir "$expect" "test_local_dnat 1" lret=1 fi done expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns2$dir "$expect" + bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat 2" lret=1 fi done @@ -288,9 +303,9 @@ EOF # expect 0 count in ns1 expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_local_dnat 3" lret=1 fi done @@ -298,19 +313,19 @@ EOF # expect 1 packet in ns2 expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns0$dir "$expect" + bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat 4" lret=1 fi done - test $lret -eq 0 && echo "PASS: ping to ns1 was $family NATted to ns2" + test $lret -eq 0 && echo "PASS: ping to $ns1 was $family NATted to $ns2" - ip netns exec ns0 nft flush chain $family nat output + ip netns exec "$ns0" nft flush chain $family nat output reset_counters - ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null + ip netns exec "$ns0" ping -q -c 1 10.0.1.99 > /dev/null if [ $? -ne 0 ]; then lret=1 echo "ERROR: ping failed" @@ -319,17 +334,17 @@ EOF expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns1$dir "$expect" + bad_counter "$ns1" ns1$dir "$expect" "test_local_dnat 5" lret=1 fi done expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns2$dir "$expect" + bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat 6" lret=1 fi done @@ -337,9 +352,9 @@ EOF # expect 1 count in ns1 expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns0 ns0$dir "$expect" + bad_counter "$ns0" ns0$dir "$expect" "test_local_dnat 7" lret=1 fi done @@ -347,14 +362,14 @@ EOF # expect 0 packet in ns2 expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns2$dir "$expect" + bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat 8" lret=1 fi done - test $lret -eq 0 && echo "PASS: ping to ns1 OK after $family nat output chain flush" + test $lret -eq 0 && echo "PASS: ping to $ns1 OK after $family nat output chain flush" return $lret } @@ -366,26 +381,26 @@ test_masquerade6() local natflags=$2 local lret=0 - ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null - ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 via ipv6" + echo "ERROR: cannot ping $ns1 from $ns2 via ipv6" return 1 lret=1 fi expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns2$dir "$expect" + bad_counter "$ns1" ns2$dir "$expect" "test_masquerade6 1" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_masquerade6 2" lret=1 fi done @@ -393,7 +408,7 @@ test_masquerade6() reset_counters # add masquerading rule -ip netns exec ns0 nft -f - <<EOF +ip netns exec "$ns0" nft -f /dev/stdin <<EOF table $family nat { chain postrouting { type nat hook postrouting priority 0; policy accept; @@ -406,24 +421,24 @@ EOF return $ksft_skip fi - ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags" + echo "ERROR: cannot ping $ns1 from $ns2 with active $family masquerade $natflags" lret=1 fi # ns1 should have seen packets from ns0, due to masquerade expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_masquerade6 3" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_masquerade6 4" lret=1 fi done @@ -431,32 +446,32 @@ EOF # ns1 should not have seen packets from ns2, due to masquerade expect="packets 0 bytes 0" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_masquerade6 5" lret=1 fi - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns0" ns1$dir "$expect" "test_masquerade6 6" lret=1 fi done - ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)" + echo "ERROR: cannot ping $ns1 from $ns2 with active ipv6 masquerade $natflags (attempt 2)" lret=1 fi - ip netns exec ns0 nft flush chain $family nat postrouting + ip netns exec "$ns0" nft flush chain $family nat postrouting if [ $? -ne 0 ]; then echo "ERROR: Could not flush $family nat postrouting" 1>&2 lret=1 fi - test $lret -eq 0 && echo "PASS: $family IPv6 masquerade $natflags for ns2" + test $lret -eq 0 && echo "PASS: $family IPv6 masquerade $natflags for $ns2" return $lret } @@ -467,26 +482,26 @@ test_masquerade() local natflags=$2 local lret=0 - ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null - ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null - ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 $natflags" + echo "ERROR: cannot ping $ns1 from "$ns2" $natflags" lret=1 fi expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns2$dir "$expect" + bad_counter "$ns1" ns2$dir "$expect" "test_masquerade 1" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_masquerade 2" lret=1 fi done @@ -494,7 +509,7 @@ test_masquerade() reset_counters # add masquerading rule -ip netns exec ns0 nft -f - <<EOF +ip netns exec "$ns0" nft -f /dev/stdin <<EOF table $family nat { chain postrouting { type nat hook postrouting priority 0; policy accept; @@ -507,24 +522,24 @@ EOF return $ksft_skip fi - ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags" + echo "ERROR: cannot ping $ns1 from $ns2 with active $family masquerade $natflags" lret=1 fi # ns1 should have seen packets from ns0, due to masquerade expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_masquerade 3" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_masquerade 4" lret=1 fi done @@ -532,32 +547,32 @@ EOF # ns1 should not have seen packets from ns2, due to masquerade expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_masquerade 5" lret=1 fi - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns0" ns1$dir "$expect" "test_masquerade 6" lret=1 fi done - ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)" + echo "ERROR: cannot ping $ns1 from $ns2 with active ip masquerade $natflags (attempt 2)" lret=1 fi - ip netns exec ns0 nft flush chain $family nat postrouting + ip netns exec "$ns0" nft flush chain $family nat postrouting if [ $? -ne 0 ]; then echo "ERROR: Could not flush $family nat postrouting" 1>&2 lret=1 fi - test $lret -eq 0 && echo "PASS: $family IP masquerade $natflags for ns2" + test $lret -eq 0 && echo "PASS: $family IP masquerade $natflags for $ns2" return $lret } @@ -567,25 +582,25 @@ test_redirect6() local family=$1 local lret=0 - ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null - ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannnot ping ns1 from ns2 via ipv6" + echo "ERROR: cannnot ping $ns1 from $ns2 via ipv6" lret=1 fi expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns2$dir "$expect" + bad_counter "$ns1" ns2$dir "$expect" "test_redirect6 1" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_redirect6 2" lret=1 fi done @@ -593,7 +608,7 @@ test_redirect6() reset_counters # add redirect rule -ip netns exec ns0 nft -f - <<EOF +ip netns exec "$ns0" nft -f /dev/stdin <<EOF table $family nat { chain prerouting { type nat hook prerouting priority 0; policy accept; @@ -606,18 +621,18 @@ EOF return $ksft_skip fi - ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 via ipv6 with active $family redirect" + echo "ERROR: cannot ping $ns1 from $ns2 via ipv6 with active $family redirect" lret=1 fi # ns1 should have seen no packets from ns2, due to redirection expect="packets 0 bytes 0" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_redirect6 3" lret=1 fi done @@ -625,20 +640,20 @@ EOF # ns0 should have seen packets from ns2, due to masquerade expect="packets 1 bytes 104" for dir in "in6" "out6" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_redirect6 4" lret=1 fi done - ip netns exec ns0 nft delete table $family nat + ip netns exec "$ns0" nft delete table $family nat if [ $? -ne 0 ]; then echo "ERROR: Could not delete $family nat table" 1>&2 lret=1 fi - test $lret -eq 0 && echo "PASS: $family IPv6 redirection for ns2" + test $lret -eq 0 && echo "PASS: $family IPv6 redirection for $ns2" return $lret } @@ -648,26 +663,26 @@ test_redirect() local family=$1 local lret=0 - ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null - ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null - ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2" + echo "ERROR: cannot ping $ns1 from $ns2" lret=1 fi expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns2$dir "$expect" + bad_counter "$ns1" $ns2$dir "$expect" "test_redirect 1" lret=1 fi - cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns2 ns1$dir "$expect" + bad_counter "$ns2" ns1$dir "$expect" "test_redirect 2" lret=1 fi done @@ -675,7 +690,7 @@ test_redirect() reset_counters # add redirect rule -ip netns exec ns0 nft -f - <<EOF +ip netns exec "$ns0" nft -f /dev/stdin <<EOF table $family nat { chain prerouting { type nat hook prerouting priority 0; policy accept; @@ -688,9 +703,9 @@ EOF return $ksft_skip fi - ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 if [ $? -ne 0 ] ; then - echo "ERROR: cannot ping ns1 from ns2 with active $family ip redirect" + echo "ERROR: cannot ping $ns1 from $ns2 with active $family ip redirect" lret=1 fi @@ -698,9 +713,9 @@ EOF expect="packets 0 bytes 0" for dir in "in" "out" ; do - cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns1" ns0$dir "$expect" "test_redirect 3" lret=1 fi done @@ -708,28 +723,28 @@ EOF # ns0 should have seen packets from ns2, due to masquerade expect="packets 1 bytes 84" for dir in "in" "out" ; do - cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect") if [ $? -ne 0 ]; then - bad_counter ns1 ns0$dir "$expect" + bad_counter "$ns0" ns0$dir "$expect" "test_redirect 4" lret=1 fi done - ip netns exec ns0 nft delete table $family nat + ip netns exec "$ns0" nft delete table $family nat if [ $? -ne 0 ]; then echo "ERROR: Could not delete $family nat table" 1>&2 lret=1 fi - test $lret -eq 0 && echo "PASS: $family IP redirection for ns2" + test $lret -eq 0 && echo "PASS: $family IP redirection for $ns2" return $lret } -# ip netns exec ns0 ping -c 1 -q 10.0.$i.99 +# ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99 for i in 0 1 2; do -ip netns exec ns$i nft -f - <<EOF +ip netns exec ns$i-$sfx nft -f /dev/stdin <<EOF table inet filter { counter ns0in {} counter ns1in {} @@ -796,18 +811,18 @@ done sleep 3 # test basic connectivity for i in 1 2; do - ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null + ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99 > /dev/null if [ $? -ne 0 ];then echo "ERROR: Could not reach other namespace(s)" 1>&2 ret=1 fi - ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null + ip netns exec "$ns0" ping -c 1 -q dead:$i::99 > /dev/null if [ $? -ne 0 ];then echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2 ret=1 fi - check_counters ns$i + check_counters ns$i-$sfx if [ $? -ne 0 ]; then ret=1 fi @@ -820,7 +835,7 @@ for i in 1 2; do done if [ $ret -eq 0 ];then - echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2" + echo "PASS: netns routing/connectivity: $ns0 can reach $ns1 and $ns2" fi reset_counters @@ -846,4 +861,9 @@ reset_counters $test_inet_nat && test_redirect inet $test_inet_nat && test_redirect6 inet +if [ $ret -ne 0 ];then + echo -n "FAIL: " + nft --version +fi + exit $ret diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c index eec2663261f2..e8a657a5f48a 100644 --- a/tools/testing/selftests/rseq/param_test.c +++ b/tools/testing/selftests/rseq/param_test.c @@ -15,7 +15,7 @@ #include <errno.h> #include <stddef.h> -static inline pid_t gettid(void) +static inline pid_t rseq_gettid(void) { return syscall(__NR_gettid); } @@ -373,11 +373,12 @@ void *test_percpu_spinlock_thread(void *arg) rseq_percpu_unlock(&data->lock, cpu); #ifndef BENCHMARK if (i != 0 && !(i % (reps / 10))) - printf_verbose("tid %d: count %lld\n", (int) gettid(), i); + printf_verbose("tid %d: count %lld\n", + (int) rseq_gettid(), i); #endif } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && thread_data->reg && rseq_unregister_current_thread()) abort(); @@ -454,11 +455,12 @@ void *test_percpu_inc_thread(void *arg) } while (rseq_unlikely(ret)); #ifndef BENCHMARK if (i != 0 && !(i % (reps / 10))) - printf_verbose("tid %d: count %lld\n", (int) gettid(), i); + printf_verbose("tid %d: count %lld\n", + (int) rseq_gettid(), i); #endif } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && thread_data->reg && rseq_unregister_current_thread()) abort(); @@ -605,7 +607,7 @@ void *test_percpu_list_thread(void *arg) } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && rseq_unregister_current_thread()) abort(); @@ -796,7 +798,7 @@ void *test_percpu_buffer_thread(void *arg) } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && rseq_unregister_current_thread()) abort(); @@ -1011,7 +1013,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg) } printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", - (int) gettid(), nr_abort, signals_delivered); + (int) rseq_gettid(), nr_abort, signals_delivered); if (!opt_disable_rseq && rseq_unregister_current_thread()) abort(); diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index d40d60e7499e..3f63eb362b92 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h @@ -149,11 +149,13 @@ static inline void rseq_clear_rseq_cs(void) /* * rseq_prepare_unload() should be invoked by each thread executing a rseq * critical section at least once between their last critical section and - * library unload of the library defining the rseq critical section - * (struct rseq_cs). This also applies to use of rseq in code generated by - * JIT: rseq_prepare_unload() should be invoked at least once by each - * thread executing a rseq critical section before reclaim of the memory - * holding the struct rseq_cs. + * library unload of the library defining the rseq critical section (struct + * rseq_cs) or the code referred to by the struct rseq_cs start_ip and + * post_commit_offset fields. This also applies to use of rseq in code + * generated by JIT: rseq_prepare_unload() should be invoked at least once by + * each thread executing a rseq critical section before reclaim of the memory + * holding the struct rseq_cs or reclaim of the code pointed to by struct + * rseq_cs start_ip and post_commit_offset fields. */ static inline void rseq_prepare_unload(void) { diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings new file mode 100644 index 000000000000..e7b9417537fb --- /dev/null +++ b/tools/testing/selftests/rseq/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/safesetid/Makefile b/tools/testing/selftests/safesetid/Makefile index 98da7a504737..fa02c4d5ec13 100644 --- a/tools/testing/selftests/safesetid/Makefile +++ b/tools/testing/selftests/safesetid/Makefile @@ -1,8 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for mount selftests. -CFLAGS = -Wall -lcap -O2 +CFLAGS = -Wall -O2 +LDLIBS = -lcap -TEST_PROGS := run_tests.sh +TEST_PROGS := safesetid-test.sh TEST_GEN_FILES := safesetid-test include ../lib.mk diff --git a/tools/testing/selftests/safesetid/safesetid-test.c b/tools/testing/selftests/safesetid/safesetid-test.c index 8f40c6ecdad1..0c4d50644c13 100644 --- a/tools/testing/selftests/safesetid/safesetid-test.c +++ b/tools/testing/selftests/safesetid/safesetid-test.c @@ -213,7 +213,8 @@ static void test_setuid(uid_t child_uid, bool expect_success) } if (cpid == 0) { /* Code executed by child */ - setuid(child_uid); + if (setuid(child_uid) < 0) + exit(EXIT_FAILURE); if (getuid() == child_uid) exit(EXIT_SUCCESS); else @@ -291,8 +292,10 @@ int main(int argc, char **argv) // First test to make sure we can write userns mappings from a user // that doesn't have any restrictions (as long as it has CAP_SETUID); - setuid(NO_POLICY_USER); - setgid(NO_POLICY_USER); + if (setuid(NO_POLICY_USER) < 0) + die("Error with set uid(%d)\n", NO_POLICY_USER); + if (setgid(NO_POLICY_USER) < 0) + die("Error with set gid(%d)\n", NO_POLICY_USER); // Take away all but setid caps drop_caps(true); @@ -306,8 +309,10 @@ int main(int argc, char **argv) die("test_userns failed when it should work\n"); } - setuid(RESTRICTED_PARENT); - setgid(RESTRICTED_PARENT); + if (setuid(RESTRICTED_PARENT) < 0) + die("Error with set uid(%d)\n", RESTRICTED_PARENT); + if (setgid(RESTRICTED_PARENT) < 0) + die("Error with set gid(%d)\n", RESTRICTED_PARENT); test_setuid(ROOT_USER, false); test_setuid(ALLOWED_CHILD1, true); diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 6944b898bb53..ee1b727ede04 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -3158,7 +3158,18 @@ TEST(user_notification_basic) EXPECT_GT(poll(&pollfd, 1, -1), 0); EXPECT_EQ(pollfd.revents, POLLIN); - EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + /* Test that we can't pass garbage to the kernel. */ + memset(&req, 0, sizeof(req)); + req.pid = -1; + errno = 0; + ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + if (ret) { + req.pid = 0; + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + } pollfd.fd = listener; pollfd.events = POLLIN | POLLOUT; @@ -3278,6 +3289,7 @@ TEST(user_notification_signal) close(sk_pair[1]); + memset(&req, 0, sizeof(req)); EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); EXPECT_EQ(kill(pid, SIGUSR1), 0); @@ -3296,6 +3308,7 @@ TEST(user_notification_signal) EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); EXPECT_EQ(errno, ENOENT); + memset(&req, 0, sizeof(req)); EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); resp.id = req.id; diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json index 76ae03a64506..2e361cea63bc 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json @@ -152,7 +152,7 @@ ] }, { - "id": "6f5e", + "id": "b99c", "name": "Add basic filter with cmp ematch u8/transport layer and default action", "category": [ "filter", diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json index 0f89cd50a94b..8877f7b2b809 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json @@ -1,27 +1,5 @@ [ { - "id": "e9a3", - "name": "Add u32 with source match", - "category": [ - "filter", - "u32" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress" - ], - "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 u32 match ip src 127.0.0.1/32 flowid 1:1 action ok", - "expExitCode": "0", - "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", - "matchPattern": "match 7f000001/ffffffff at 12", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { "id": "2638", "name": "Add matchall and try to get it", "category": [ diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json new file mode 100644 index 000000000000..e09d3c0e307f --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json @@ -0,0 +1,205 @@ +[ + { + "id": "afa9", + "name": "Add u32 with source match", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 ingress protocol ip prio 1 u32 match ip src 127.0.0.1/32 flowid 1:1 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "filter protocol ip pref 1 u32 chain (0[ ]+$|0 fh 800: ht divisor 1|0 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:1.*match 7f000001/ffffffff at 12)", + "matchCount": "3", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "6aa7", + "name": "Add/Replace u32 with source match and invalid indev", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 ingress protocol ip prio 1 u32 match ip src 127.0.0.1/32 indev notexist20 flowid 1:1 action ok", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "filter protocol ip pref 1 u32 chain 0", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "bc4d", + "name": "Replace valid u32 with source match and invalid indev", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 ingress protocol ip prio 1 u32 match ip src 127.0.0.3/32 flowid 1:3 action ok" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 ingress protocol ip prio 1 u32 match ip src 127.0.0.2/32 indev notexist20 flowid 1:2 action ok", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "filter protocol ip pref 1 u32 chain (0[ ]+$|0 fh 800: ht divisor 1|0 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:3.*match 7f000003/ffffffff at 12)", + "matchCount": "3", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "648b", + "name": "Add u32 with custom hash table", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 ingress prio 99 handle 42: u32 divisor 256", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "pref 99 u32 chain (0[ ]+$|0 fh 42: ht divisor 256|0 fh 800: ht divisor 1)", + "matchCount": "3", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "6658", + "name": "Add/Replace u32 with custom hash table and invalid handle", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 ingress prio 99 handle 42:42 u32 divisor 256", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "pref 99 u32 chain 0", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9d0a", + "name": "Replace valid u32 with custom hash table and invalid handle", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 ingress prio 99 handle 42: u32 divisor 256" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 ingress prio 99 handle 42:42 u32 divisor 128", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "pref 99 u32 chain (0[ ]+$|0 fh 42: ht divisor 256|0 fh 800: ht divisor 1)", + "matchCount": "3", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "1644", + "name": "Add u32 filter that links to a custom hash table", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 ingress prio 99 handle 43: u32 divisor 256" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 ingress protocol ip prio 98 u32 link 43: hashkey mask 0x0000ff00 at 12 match ip src 192.168.0.0/16", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "filter protocol ip pref 98 u32 chain (0[ ]+$|0 fh 801: ht divisor 1|0 fh 801::800 order 2048 key ht 801 bkt 0 link 43:.*match c0a80000/ffff0000 at 12.*hash mask 0000ff00 at 12)", + "matchCount": "3", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "74c2", + "name": "Add/Replace u32 filter with invalid hash table id", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 ingress protocol ip prio 20 u32 ht 47:47 action drop", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "filter protocol ip pref 20 u32 chain 0", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "1fe6", + "name": "Replace valid u32 filter with invalid hash table id", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 ingress protocol ip prio 99 handle 43: u32 divisor 1", + "$TC filter add dev $DEV1 ingress protocol ip prio 98 u32 ht 43: match tcp src 22 FFFF classid 1:3" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 ingress protocol ip prio 98 u32 ht 43:1 match tcp src 23 FFFF classid 1:4", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 ingress", + "matchPattern": "filter protocol ip pref 99 u32 chain (0[ ]+$|0 fh (43|800): ht divisor 1|0 fh 43::800 order 2048 key ht 43 bkt 0 flowid 1:3.*match 00160000/ffff0000 at nexthdr\\+0)", + "matchCount": "4", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + } +] diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh index 80521d46220c..8155c2ea7ccb 100755 --- a/tools/testing/selftests/tpm2/test_smoke.sh +++ b/tools/testing/selftests/tpm2/test_smoke.sh @@ -2,3 +2,9 @@ # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) python -m unittest -v tpm2_tests.SmokeTest +python -m unittest -v tpm2_tests.AsyncTest + +CLEAR_CMD=$(which tpm2_clear) +if [ -n $CLEAR_CMD ]; then + tpm2_clear -T device +fi diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py index 828c18584624..d0fcb66a88a6 100644 --- a/tools/testing/selftests/tpm2/tpm2.py +++ b/tools/testing/selftests/tpm2/tpm2.py @@ -6,8 +6,8 @@ import socket import struct import sys import unittest -from fcntl import ioctl - +import fcntl +import select TPM2_ST_NO_SESSIONS = 0x8001 TPM2_ST_SESSIONS = 0x8002 @@ -352,6 +352,7 @@ def hex_dump(d): class Client: FLAG_DEBUG = 0x01 FLAG_SPACE = 0x02 + FLAG_NONBLOCK = 0x04 TPM_IOC_NEW_SPACE = 0xa200 def __init__(self, flags = 0): @@ -362,13 +363,27 @@ class Client: else: self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0) + if (self.flags & Client.FLAG_NONBLOCK): + flags = fcntl.fcntl(self.tpm, fcntl.F_GETFL) + flags |= os.O_NONBLOCK + fcntl.fcntl(self.tpm, fcntl.F_SETFL, flags) + self.tpm_poll = select.poll() + def close(self): self.tpm.close() def send_cmd(self, cmd): self.tpm.write(cmd) + + if (self.flags & Client.FLAG_NONBLOCK): + self.tpm_poll.register(self.tpm, select.POLLIN) + self.tpm_poll.poll(10000) + rsp = self.tpm.read() + if (self.flags & Client.FLAG_NONBLOCK): + self.tpm_poll.unregister(self.tpm) + if (self.flags & Client.FLAG_DEBUG) != 0: sys.stderr.write('cmd' + os.linesep) sys.stderr.write(hex_dump(cmd) + os.linesep) diff --git a/tools/testing/selftests/tpm2/tpm2_tests.py b/tools/testing/selftests/tpm2/tpm2_tests.py index d4973be53493..728be7c69b76 100644 --- a/tools/testing/selftests/tpm2/tpm2_tests.py +++ b/tools/testing/selftests/tpm2/tpm2_tests.py @@ -288,3 +288,16 @@ class SpaceTest(unittest.TestCase): self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE | tpm2.TSS2_RESMGR_TPM_RC_LAYER) + +class AsyncTest(unittest.TestCase): + def setUp(self): + logging.basicConfig(filename='AsyncTest.log', level=logging.DEBUG) + + def test_async(self): + log = logging.getLogger(__name__) + log.debug(sys._getframe().f_code.co_name) + + async_client = tpm2.Client(tpm2.Client.FLAG_NONBLOCK) + log.debug("Calling get_cap in a NON_BLOCKING mode") + async_client.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_LOADED_SESSION) + async_client.close() diff --git a/usr/gen_initramfs_list.sh b/usr/gen_initramfs_list.sh index 0aad760fcd8c..2bbac73e6477 100755 --- a/usr/gen_initramfs_list.sh +++ b/usr/gen_initramfs_list.sh @@ -128,7 +128,7 @@ parse() { str="${ftype} ${name} ${location} ${str}" ;; "nod") - local dev=`LC_ALL=C ls -l "${location}"` + local dev="`LC_ALL=C ls -l "${location}"`" local maj=`field 5 ${dev}` local min=`field 6 ${dev}` maj=${maj%,} diff --git a/usr/include/Makefile b/usr/include/Makefile index 4a753a48767b..84598469e6ff 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -91,7 +91,7 @@ endif # asm-generic/*.h is used by asm/*.h, and should not be included directly header-test- += asm-generic/% -extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h')) +extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null)) quiet_cmd_hdrtest = HDRTEST $< cmd_hdrtest = \ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 12e0280291ce..8de4daf25097 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1352,7 +1352,7 @@ long kvm_arch_vm_ioctl(struct file *filp, } } -static void cpu_init_hyp_mode(void *dummy) +static void cpu_init_hyp_mode(void) { phys_addr_t pgd_ptr; unsigned long hyp_stack_ptr; @@ -1386,7 +1386,7 @@ static void cpu_hyp_reinit(void) if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); else - cpu_init_hyp_mode(NULL); + cpu_init_hyp_mode(); kvm_arm_init_debug(); diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 38b4c910b6c3..0b32a904a1bb 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -38,6 +38,11 @@ static unsigned long io_map_base; #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) +static bool is_iomap(unsigned long flags) +{ + return flags & KVM_S2PTE_FLAG_IS_IOMAP; +} + static bool memslot_is_logging(struct kvm_memory_slot *memslot) { return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); @@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, vma_pagesize = vma_kernel_pagesize(vma); if (logging_active || + (vma->vm_flags & VM_PFNMAP) || !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { force_pte = true; vma_pagesize = PAGE_SIZE; @@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, writable = false; } + if (exec_fault && is_iomap(flags)) + return -ENOEXEC; + spin_lock(&kvm->mmu_lock); if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; @@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (writable) kvm_set_pfn_dirty(pfn); - if (fault_status != FSC_PERM) + if (fault_status != FSC_PERM && !is_iomap(flags)) clean_dcache_guest_page(pfn, vma_pagesize); if (exec_fault) @@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) if (kvm_is_error_hva(hva) || (write_fault && !writable)) { if (is_iabt) { /* Prefetch Abort on I/O address */ - kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); - ret = 1; - goto out_unlock; + ret = -ENOEXEC; + goto out; } /* @@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); if (ret == 0) ret = 1; +out: + if (ret == -ENOEXEC) { + kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); + ret = 1; + } out_unlock: srcu_read_unlock(&vcpu->kvm->srcu, idx); return ret; @@ -2302,15 +2315,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, break; /* - * Mapping a read-only VMA is only allowed if the - * memory region is configured as read-only. - */ - if (writable && !(vma->vm_flags & VM_WRITE)) { - ret = -EPERM; - break; - } - - /* * Take the intersection of this VMA with the memory region */ vm_start = max(hva, vma->vm_start); diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index b3c5de48064c..a963b9d766b7 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -70,7 +70,7 @@ void kvm_vgic_early_init(struct kvm *kvm) */ int kvm_vgic_create(struct kvm *kvm, u32 type) { - int i, vcpu_lock_idx = -1, ret; + int i, ret; struct kvm_vcpu *vcpu; if (irqchip_in_kernel(kvm)) @@ -86,17 +86,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) !kvm_vgic_global_state.can_emulate_gicv2) return -ENODEV; - /* - * Any time a vcpu is run, vcpu_load is called which tries to grab the - * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure - * that no other VCPUs are run while we create the vgic. - */ ret = -EBUSY; - kvm_for_each_vcpu(i, vcpu, kvm) { - if (!mutex_trylock(&vcpu->mutex)) - goto out_unlock; - vcpu_lock_idx = i; - } + if (!lock_all_vcpus(kvm)) + return ret; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.has_run_once) @@ -125,10 +117,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); out_unlock: - for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { - vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); - mutex_unlock(&vcpu->mutex); - } + unlock_all_vcpus(kvm); return ret; } @@ -177,6 +166,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) break; default: kfree(dist->spis); + dist->spis = NULL; return -EINVAL; } } |