summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/acpi_iort.h4
-rw-r--r--include/linux/atmdev.h1
-rw-r--r--include/linux/atomic-arch-fallback.h90
-rw-r--r--include/linux/atomic-fallback.h90
-rw-r--r--include/linux/auxiliary_bus.h77
-rw-r--r--include/linux/blkdev.h17
-rw-r--r--include/linux/bootconfig.h3
-rw-r--r--include/linux/bpf-cgroup.h12
-rw-r--r--include/linux/bpf.h80
-rw-r--r--include/linux/bpf_lsm.h30
-rw-r--r--include/linux/bpf_types.h1
-rw-r--r--include/linux/bpf_verifier.h30
-rw-r--r--include/linux/btf.h6
-rw-r--r--include/linux/build_bug.h5
-rw-r--r--include/linux/can/dev.h38
-rw-r--r--include/linux/can/dev/peak_canfd.h2
-rw-r--r--include/linux/ccp.h3
-rw-r--r--include/linux/cgroup-defs.h15
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/compaction.h12
-rw-r--r--include/linux/completion.h5
-rw-r--r--include/linux/console.h1
-rw-r--r--include/linux/context_tracking.h6
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h6
-rw-r--r--include/linux/device.h10
-rw-r--r--include/linux/device/class.h14
-rw-r--r--include/linux/dma-buf-map.h266
-rw-r--r--include/linux/dma-buf.h18
-rw-r--r--include/linux/edac.h16
-rw-r--r--include/linux/elf.h10
-rw-r--r--include/linux/elfcore.h22
-rw-r--r--include/linux/entry-common.h171
-rw-r--r--include/linux/entry-kvm.h4
-rw-r--r--include/linux/ethtool.h1
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/font.h3
-rw-r--r--include/linux/fpga/fpga-mgr.h2
-rw-r--r--include/linux/freelist.h129
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/fscrypt.h112
-rw-r--r--include/linux/fwnode.h73
-rw-r--r--include/linux/genl_magic_struct.h2
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/goldfish.h8
-rw-r--r--include/linux/highmem-internal.h232
-rw-r--r--include/linux/highmem.h313
-rw-r--r--include/linux/hrtimer.h6
-rw-r--r--include/linux/huge_mm.h93
-rw-r--r--include/linux/ieee80211.h12
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_frad.h92
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h6
-rw-r--r--include/linux/iio/buffer.h3
-rw-r--r--include/linux/iio/iio-opaque.h2
-rw-r--r--include/linux/iio/iio.h6
-rw-r--r--include/linux/iio/trigger.h6
-rw-r--r--include/linux/iio/triggered_buffer.h23
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/inetdevice.h4
-rw-r--r--include/linux/io-mapping.h38
-rw-r--r--include/linux/io-pgtable.h8
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/ipc_namespace.h3
-rw-r--r--include/linux/irq_work.h33
-rw-r--r--include/linux/irqdomain.h12
-rw-r--r--include/linux/irqflags.h8
-rw-r--r--include/linux/kbd_kern.h3
-rw-r--r--include/linux/kernel.h22
-rw-r--r--include/linux/kernfs.h2
-rw-r--r--include/linux/key-type.h1
-rw-r--r--include/linux/kprobes.h25
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/llist.h23
-rw-r--r--include/linux/lockdep.h17
-rw-r--r--include/linux/lsm_audit.h2
-rw-r--r--include/linux/lsm_hook_defs.h2
-rw-r--r--include/linux/marvell_phy.h3
-rw-r--r--include/linux/mei_cl_bus.h6
-rw-r--r--include/linux/memcontrol.h462
-rw-r--r--include/linux/mhi.h25
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx5/device.h8
-rw-r--r--include/linux/mlx5/driver.h42
-rw-r--r--include/linux/mlx5/eswitch.h8
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc.h103
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h166
-rw-r--r--include/linux/mm.h143
-rw-r--r--include/linux/mm_types.h15
-rw-r--r--include/linux/mmap_lock.h94
-rw-r--r--include/linux/mmzone.h76
-rw-r--r--include/linux/mod_devicetable.h8
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/msi.h46
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdev_features.h4
-rw-r--r--include/linux/netdevice.h90
-rw-r--r--include/linux/netfilter/ipset/ip_set.h5
-rw-r--r--include/linux/netfilter/x_tables.h5
-rw-r--r--include/linux/nfs_page.h4
-rw-r--r--include/linux/ns_common.h3
-rw-r--r--include/linux/nvmem-provider.h17
-rw-r--r--include/linux/of.h10
-rw-r--r--include/linux/page-flags.h18
-rw-r--r--include/linux/page_ext.h8
-rw-r--r--include/linux/pagevec.h3
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/pgtable.h71
-rw-r--r--include/linux/phy.h22
-rw-r--r--include/linux/pid_namespace.h4
-rw-r--r--include/linux/platform_data/ad7298.h19
-rw-r--r--include/linux/platform_data/ad7303.h20
-rw-r--r--include/linux/platform_data/ad7887.h4
-rw-r--r--include/linux/platform_data/adau1977.h44
-rw-r--r--include/linux/platform_data/at91_adc.h49
-rw-r--r--include/linux/platform_data/hirschmann-hellcreek.h23
-rw-r--r--include/linux/platform_data/media/coda.h14
-rw-r--r--include/linux/platform_data/serial-imx.h15
-rw-r--r--include/linux/platform_data/shmob_drm.h2
-rw-r--r--include/linux/platform_data/usb-ehci-mxc.h14
-rw-r--r--include/linux/platform_device.h3
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/preempt.h83
-rw-r--r--include/linux/property.h3
-rw-r--r--include/linux/ptp_classify.h7
-rw-r--r--include/linux/ptp_clock_kernel.h13
-rw-r--r--include/linux/purgatory.h2
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/rcupdate.h11
-rw-r--r--include/linux/rcupdate_trace.h4
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/refcount.h2
-rw-r--r--include/linux/regmap.h35
-rw-r--r--include/linux/rfkill.h24
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/rtc.h69
-rw-r--r--include/linux/rtsx_pci.h2
-rw-r--r--include/linux/rwsem.h3
-rw-r--r--include/linux/scatterlist.h6
-rw-r--r--include/linux/sched.h20
-rw-r--r--include/linux/sched/hotplug.h2
-rw-r--r--include/linux/sched/mm.h21
-rw-r--r--include/linux/sched/signal.h20
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/scs.h16
-rw-r--r--include/linux/sctp.h20
-rw-r--r--include/linux/sdla.h240
-rw-r--r--include/linux/seccomp.h2
-rw-r--r--include/linux/security.h7
-rw-r--r--include/linux/seqlock.h121
-rw-r--r--include/linux/serial_8250.h5
-rw-r--r--include/linux/serial_core.h20
-rw-r--r--include/linux/serial_pnx8xxx.h67
-rw-r--r--include/linux/set_memory.h5
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/signal.h14
-rw-r--r--include/linux/signal_types.h12
-rw-r--r--include/linux/siox.h2
-rw-r--r--include/linux/skbuff.h22
-rw-r--r--include/linux/slab.h18
-rw-r--r--include/linux/smp.h19
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h29
-rw-r--r--include/linux/soundwire/sdw.h4
-rw-r--r--include/linux/soundwire/sdw_registers.h43
-rw-r--r--include/linux/spmi.h1
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/stop_machine.h5
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/syscall_user_dispatch.h40
-rw-r--r--include/linux/thread_info.h50
-rw-r--r--include/linux/thunderbolt.h18
-rw-r--r--include/linux/time_namespace.h37
-rw-r--r--include/linux/timekeeping.h2
-rw-r--r--include/linux/timer.h1
-rw-r--r--include/linux/timex.h1
-rw-r--r--include/linux/tracehook.h44
-rw-r--r--include/linux/tty.h48
-rw-r--r--include/linux/tty_driver.h9
-rw-r--r--include/linux/uio_driver.h16
-rw-r--r--include/linux/usb/hcd.h4
-rw-r--r--include/linux/usb/pd.h2
-rw-r--r--include/linux/usb/pd_vdo.h19
-rw-r--r--include/linux/usb/r8152.h37
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/tcpm.h28
-rw-r--r--include/linux/usb/typec.h2
-rw-r--r--include/linux/usb/typec_tbt.h6
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/user_namespace.h5
-rw-r--r--include/linux/utsname.h9
-rw-r--r--include/linux/vmalloc.h8
-rw-r--r--include/linux/vmstat.h104
-rw-r--r--include/linux/wimax/debug.h491
-rw-r--r--include/linux/zsmalloc.h1
202 files changed, 3817 insertions, 2320 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 39263c6b52e1..2630c2e953f7 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -55,7 +55,7 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
if (!fwnode)
return NULL;
- fwnode->ops = &acpi_static_fwnode_ops;
+ fwnode_init(fwnode, &acpi_static_fwnode_ops);
return fwnode;
}
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 20a32120bb88..1a12baa58e40 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -38,6 +38,7 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
const u32 *id_in);
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
+phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
@@ -55,6 +56,9 @@ static inline const struct iommu_ops *iort_iommu_configure_id(
static inline
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
+
+static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
+{ return PHYS_ADDR_MAX; }
#endif
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 5d5ff2203fa2..d7493016cd46 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -186,6 +186,7 @@ struct atmdev_ops { /* only send is required */
void __user *arg);
#endif
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
+ int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
void (*phy_put)(struct atm_dev *dev,unsigned char value,
unsigned long addr);
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h
index bcb6aa27cfa6..a3dba31df01e 100644
--- a/include/linux/atomic-arch-fallback.h
+++ b/include/linux/atomic-arch-fallback.h
@@ -9,9 +9,9 @@
#include <linux/compiler.h>
#ifndef arch_xchg_relaxed
-#define arch_xchg_relaxed arch_xchg
-#define arch_xchg_acquire arch_xchg
-#define arch_xchg_release arch_xchg
+#define arch_xchg_acquire arch_xchg
+#define arch_xchg_release arch_xchg
+#define arch_xchg_relaxed arch_xchg
#else /* arch_xchg_relaxed */
#ifndef arch_xchg_acquire
@@ -32,9 +32,9 @@
#endif /* arch_xchg_relaxed */
#ifndef arch_cmpxchg_relaxed
-#define arch_cmpxchg_relaxed arch_cmpxchg
-#define arch_cmpxchg_acquire arch_cmpxchg
-#define arch_cmpxchg_release arch_cmpxchg
+#define arch_cmpxchg_acquire arch_cmpxchg
+#define arch_cmpxchg_release arch_cmpxchg
+#define arch_cmpxchg_relaxed arch_cmpxchg
#else /* arch_cmpxchg_relaxed */
#ifndef arch_cmpxchg_acquire
@@ -55,9 +55,9 @@
#endif /* arch_cmpxchg_relaxed */
#ifndef arch_cmpxchg64_relaxed
-#define arch_cmpxchg64_relaxed arch_cmpxchg64
-#define arch_cmpxchg64_acquire arch_cmpxchg64
-#define arch_cmpxchg64_release arch_cmpxchg64
+#define arch_cmpxchg64_acquire arch_cmpxchg64
+#define arch_cmpxchg64_release arch_cmpxchg64
+#define arch_cmpxchg64_relaxed arch_cmpxchg64
#else /* arch_cmpxchg64_relaxed */
#ifndef arch_cmpxchg64_acquire
@@ -77,6 +77,76 @@
#endif /* arch_cmpxchg64_relaxed */
+#ifndef arch_try_cmpxchg_relaxed
+#ifdef arch_try_cmpxchg
+#define arch_try_cmpxchg_acquire arch_try_cmpxchg
+#define arch_try_cmpxchg_release arch_try_cmpxchg
+#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_acquire */
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_release */
+
+#ifndef arch_try_cmpxchg_relaxed
+#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_relaxed */
+
+#else /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(...) \
+ __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(...) \
+ __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_try_cmpxchg_relaxed */
+
#ifndef arch_atomic_read_acquire
static __always_inline int
arch_atomic_read_acquire(const atomic_t *v)
@@ -2288,4 +2358,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
#endif
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 90cd26cfd69d2250303d654955a0cc12620fb91b
+// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
index fd525c71d676..2a3f55d98be9 100644
--- a/include/linux/atomic-fallback.h
+++ b/include/linux/atomic-fallback.h
@@ -9,9 +9,9 @@
#include <linux/compiler.h>
#ifndef xchg_relaxed
-#define xchg_relaxed xchg
-#define xchg_acquire xchg
-#define xchg_release xchg
+#define xchg_acquire xchg
+#define xchg_release xchg
+#define xchg_relaxed xchg
#else /* xchg_relaxed */
#ifndef xchg_acquire
@@ -32,9 +32,9 @@
#endif /* xchg_relaxed */
#ifndef cmpxchg_relaxed
-#define cmpxchg_relaxed cmpxchg
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
+#define cmpxchg_acquire cmpxchg
+#define cmpxchg_release cmpxchg
+#define cmpxchg_relaxed cmpxchg
#else /* cmpxchg_relaxed */
#ifndef cmpxchg_acquire
@@ -55,9 +55,9 @@
#endif /* cmpxchg_relaxed */
#ifndef cmpxchg64_relaxed
-#define cmpxchg64_relaxed cmpxchg64
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
+#define cmpxchg64_acquire cmpxchg64
+#define cmpxchg64_release cmpxchg64
+#define cmpxchg64_relaxed cmpxchg64
#else /* cmpxchg64_relaxed */
#ifndef cmpxchg64_acquire
@@ -77,6 +77,76 @@
#endif /* cmpxchg64_relaxed */
+#ifndef try_cmpxchg_relaxed
+#ifdef try_cmpxchg
+#define try_cmpxchg_acquire try_cmpxchg
+#define try_cmpxchg_release try_cmpxchg
+#define try_cmpxchg_relaxed try_cmpxchg
+#endif /* try_cmpxchg */
+
+#ifndef try_cmpxchg
+#define try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* try_cmpxchg */
+
+#ifndef try_cmpxchg_acquire
+#define try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = cmpxchg_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* try_cmpxchg_acquire */
+
+#ifndef try_cmpxchg_release
+#define try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = cmpxchg_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* try_cmpxchg_release */
+
+#ifndef try_cmpxchg_relaxed
+#define try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* try_cmpxchg_relaxed */
+
+#else /* try_cmpxchg_relaxed */
+
+#ifndef try_cmpxchg_acquire
+#define try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef try_cmpxchg_release
+#define try_cmpxchg_release(...) \
+ __atomic_op_release(try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef try_cmpxchg
+#define try_cmpxchg(...) \
+ __atomic_op_fence(try_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* try_cmpxchg_relaxed */
+
#define arch_atomic_read atomic_read
#define arch_atomic_read_acquire atomic_read_acquire
@@ -2522,4 +2592,4 @@ atomic64_dec_if_positive(atomic64_t *v)
#endif
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f
+// d78e6c293c661c15188f0ec05bce45188c8d5892
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
new file mode 100644
index 000000000000..fc51d45f106b
--- /dev/null
+++ b/include/linux/auxiliary_bus.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020 Intel Corporation
+ *
+ * Please see Documentation/driver-api/auxiliary_bus.rst for more information.
+ */
+
+#ifndef _AUXILIARY_BUS_H_
+#define _AUXILIARY_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct auxiliary_device {
+ struct device dev;
+ const char *name;
+ u32 id;
+};
+
+struct auxiliary_driver {
+ int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
+ void (*remove)(struct auxiliary_device *auxdev);
+ void (*shutdown)(struct auxiliary_device *auxdev);
+ int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state);
+ int (*resume)(struct auxiliary_device *auxdev);
+ const char *name;
+ struct device_driver driver;
+ const struct auxiliary_device_id *id_table;
+};
+
+static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
+{
+ return container_of(dev, struct auxiliary_device, dev);
+}
+
+static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct auxiliary_driver, driver);
+}
+
+int auxiliary_device_init(struct auxiliary_device *auxdev);
+int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname);
+#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME)
+
+static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev)
+{
+ put_device(&auxdev->dev);
+}
+
+static inline void auxiliary_device_delete(struct auxiliary_device *auxdev)
+{
+ device_del(&auxdev->dev);
+}
+
+int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner,
+ const char *modname);
+#define auxiliary_driver_register(auxdrv) \
+ __auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME)
+
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+
+/**
+ * module_auxiliary_driver() - Helper macro for registering an auxiliary driver
+ * @__auxiliary_driver: auxiliary driver struct
+ *
+ * Helper macro for auxiliary drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_auxiliary_driver(__auxiliary_driver) \
+ module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister)
+
+struct auxiliary_device *auxiliary_find_device(struct device *start,
+ const void *data,
+ int (*match)(struct device *dev, const void *data));
+
+#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 639cae2c158b..033eb5f73b65 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1073,12 +1073,15 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
* file system requests.
*/
static inline unsigned int blk_max_size_offset(struct request_queue *q,
- sector_t offset)
-{
- unsigned int chunk_sectors = q->limits.chunk_sectors;
-
- if (!chunk_sectors)
- return q->limits.max_sectors;
+ sector_t offset,
+ unsigned int chunk_sectors)
+{
+ if (!chunk_sectors) {
+ if (q->limits.chunk_sectors)
+ chunk_sectors = q->limits.chunk_sectors;
+ else
+ return q->limits.max_sectors;
+ }
if (likely(is_power_of_2(chunk_sectors)))
chunk_sectors -= offset & (chunk_sectors - 1);
@@ -1101,7 +1104,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
req_op(rq) == REQ_OP_SECURE_ERASE)
return blk_queue_get_max_sectors(q, req_op(rq));
- return min(blk_max_size_offset(q, offset),
+ return min(blk_max_size_offset(q, offset, 0),
blk_queue_get_max_sectors(q, req_op(rq)));
}
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 9903088891fa..2696eb0fc149 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -12,6 +12,9 @@
#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
#define BOOTCONFIG_MAGIC_LEN 12
+#define BOOTCONFIG_ALIGN_SHIFT 2
+#define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT)
+#define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1)
/* XBC tree node */
struct xbc_node {
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index ed71bd1a0825..72e69a0e1e8c 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -246,11 +246,11 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
+#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
+#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
sk->sk_prot->pre_connect)
@@ -434,8 +434,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 2b16bf48aab6..07cb5d15e743 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -20,6 +20,8 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/capability.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -36,9 +38,12 @@ struct seq_operations;
struct bpf_iter_aux_info;
struct bpf_local_storage;
struct bpf_local_storage_map;
+struct kobject;
+struct mem_cgroup;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
+extern struct kobject *btf_kobj;
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
@@ -133,11 +138,6 @@ struct bpf_map_ops {
const struct bpf_iter_seq_info *iter_seq_info;
};
-struct bpf_map_memory {
- u32 pages;
- struct user_struct *user;
-};
-
struct bpf_map {
/* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
@@ -158,7 +158,9 @@ struct bpf_map {
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
- struct bpf_map_memory memory;
+#ifdef CONFIG_MEMCG_KMEM
+ struct mem_cgroup *memcg;
+#endif
char name[BPF_OBJ_NAME_LEN];
u32 btf_vmlinux_value_type_id;
bool bypass_spec_v1;
@@ -310,6 +312,7 @@ enum bpf_return_type {
RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
+ RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -418,7 +421,10 @@ struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
union {
int ctx_field_size;
- u32 btf_id;
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
};
struct bpf_verifier_log *log; /* for verbose logs */
};
@@ -455,6 +461,7 @@ struct bpf_verifier_ops {
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
int (*btf_struct_access)(struct bpf_verifier_log *log,
+ const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id);
@@ -768,6 +775,7 @@ struct bpf_prog_aux {
u32 ctx_arg_info_size;
u32 max_rdonly_access;
u32 max_rdwr_access;
+ struct btf *attach_btf;
const struct bpf_ctx_arg_aux *ctx_arg_info;
struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
struct bpf_prog *dst_prog;
@@ -1002,7 +1010,6 @@ struct bpf_event_entry {
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
-const char *kernel_type_name(u32 btf_type_id);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -1199,8 +1206,6 @@ void bpf_prog_sub(struct bpf_prog *prog, int i);
void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-int __bpf_prog_charge(struct user_struct *user, u32 pages);
-void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
void __bpf_free_used_maps(struct bpf_prog_aux *aux,
struct bpf_map **used_maps, u32 len);
@@ -1215,12 +1220,6 @@ void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
-int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
-void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
-int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
-void bpf_map_charge_finish(struct bpf_map_memory *mem);
-void bpf_map_charge_move(struct bpf_map_memory *dst,
- struct bpf_map_memory *src);
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
@@ -1237,6 +1236,34 @@ int generic_map_delete_batch(struct bpf_map *map,
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
+#ifdef CONFIG_MEMCG_KMEM
+void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node);
+void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
+ size_t align, gfp_t flags);
+#else
+static inline void *
+bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node)
+{
+ return kmalloc_node(size, flags, node);
+}
+
+static inline void *
+bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
+{
+ return kzalloc(size, flags);
+}
+
+static inline void __percpu *
+bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
+ gfp_t flags)
+{
+ return __alloc_percpu_gfp(size, align, flags);
+}
+#endif
+
extern int sysctl_unprivileged_bpf_disabled;
static inline bool bpf_allow_ptr_leaks(void)
@@ -1294,6 +1321,10 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
struct bpf_link_info *info);
+enum bpf_iter_feature {
+ BPF_ITER_RESCHED = BIT(0),
+};
+
#define BPF_ITER_CTX_ARG_MAX 2
struct bpf_iter_reg {
const char *target;
@@ -1302,6 +1333,7 @@ struct bpf_iter_reg {
bpf_iter_show_fdinfo_t show_fdinfo;
bpf_iter_fill_link_info_t fill_link_info;
u32 ctx_arg_info_size;
+ u32 feature;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
const struct bpf_iter_seq_info *seq_info;
};
@@ -1422,12 +1454,13 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
-int btf_struct_access(struct bpf_verifier_log *log,
+int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id);
bool btf_struct_ids_match(struct bpf_verifier_log *log,
- int off, u32 id, u32 need_type_id);
+ const struct btf *btf, u32 id, int off,
+ const struct btf *need_btf, u32 need_type_id);
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
@@ -1482,15 +1515,6 @@ bpf_prog_inc_not_zero(struct bpf_prog *prog)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
-{
- return 0;
-}
-
-static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
-{
-}
-
static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops,
struct bpf_prog *prog)
@@ -1834,6 +1858,8 @@ extern const struct bpf_func_proto bpf_copy_from_user_proto;
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
+extern const struct bpf_func_proto bpf_sock_from_file_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index aaacb6aafc87..0d1c33ace398 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -7,6 +7,7 @@
#ifndef _LINUX_BPF_LSM_H
#define _LINUX_BPF_LSM_H
+#include <linux/sched.h>
#include <linux/bpf.h>
#include <linux/lsm_hooks.h>
@@ -26,6 +27,8 @@ extern struct lsm_blob_sizes bpf_lsm_blob_sizes;
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog);
+bool bpf_lsm_is_sleepable_hook(u32 btf_id);
+
static inline struct bpf_storage_blob *bpf_inode(
const struct inode *inode)
{
@@ -35,12 +38,29 @@ static inline struct bpf_storage_blob *bpf_inode(
return inode->i_security + bpf_lsm_blob_sizes.lbs_inode;
}
+static inline struct bpf_storage_blob *bpf_task(
+ const struct task_struct *task)
+{
+ if (unlikely(!task->security))
+ return NULL;
+
+ return task->security + bpf_lsm_blob_sizes.lbs_task;
+}
+
extern const struct bpf_func_proto bpf_inode_storage_get_proto;
extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_proto;
void bpf_inode_storage_free(struct inode *inode);
+void bpf_task_storage_free(struct task_struct *task);
#else /* !CONFIG_BPF_LSM */
+static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
+{
+ return false;
+}
+
static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
@@ -53,10 +73,20 @@ static inline struct bpf_storage_blob *bpf_inode(
return NULL;
}
+static inline struct bpf_storage_blob *bpf_task(
+ const struct task_struct *task)
+{
+ return NULL;
+}
+
static inline void bpf_inode_storage_free(struct inode *inode)
{
}
+static inline void bpf_task_storage_free(struct task_struct *task)
+{
+}
+
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 2e6f568377f1..99f7fd657d87 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -109,6 +109,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
#endif
#ifdef CONFIG_BPF_LSM
BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index e83ef6f6bf43..e941fe1484e5 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -5,6 +5,7 @@
#define _LINUX_BPF_VERIFIER_H 1
#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/btf.h> /* for struct btf and btf_id() */
#include <linux/filter.h> /* for MAX_BPF_STACK */
#include <linux/tnum.h>
@@ -43,24 +44,31 @@ enum bpf_reg_liveness {
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
+ /* Fixed part of pointer offset, pointer types only */
+ s32 off;
union {
/* valid when type == PTR_TO_PACKET */
- u16 range;
+ int range;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
struct bpf_map *map_ptr;
- u32 btf_id; /* for PTR_TO_BTF_ID */
+ /* for PTR_TO_BTF_ID */
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
/* Max size from any of the above. */
- unsigned long raw;
+ struct {
+ unsigned long raw1;
+ unsigned long raw2;
+ } raw;
};
- /* Fixed part of pointer offset, pointer types only */
- s32 off;
/* For PTR_TO_PACKET, used to find other pointers with the same variable
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
@@ -311,7 +319,10 @@ struct bpf_insn_aux_data {
struct {
enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
union {
- u32 btf_id; /* btf_id for struct typed var */
+ struct {
+ struct btf *btf;
+ u32 btf_id; /* btf_id for struct typed var */
+ };
u32 mem_size; /* mem_size for non-struct typed var */
};
} btf_var;
@@ -459,9 +470,12 @@ int check_ctx_reg(struct bpf_verifier_env *env,
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
- u32 btf_id)
+ struct btf *btf, u32 btf_id)
{
- return tgt_prog ? (((u64)tgt_prog->aux->id) << 32 | btf_id) : btf_id;
+ if (tgt_prog)
+ return ((u64)tgt_prog->aux->id << 32) | btf_id;
+ else
+ return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
}
int bpf_check_attach_target(struct bpf_verifier_log *log,
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 2bf641829664..4c200f5d242b 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -18,6 +18,7 @@ struct btf_show;
extern const struct file_operations btf_fops;
+void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
int btf_new_fd(const union bpf_attr *attr);
struct btf *btf_get_by_fd(int fd);
@@ -88,7 +89,8 @@ int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
char *buf, int len, u64 flags);
int btf_get_fd_by_id(u32 id);
-u32 btf_id(const struct btf *btf);
+u32 btf_obj_id(const struct btf *btf);
+bool btf_is_kernel(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
@@ -206,6 +208,8 @@ static inline const struct btf_var_secinfo *btf_type_var_secinfo(
}
#ifdef CONFIG_BPF_SYSCALL
+struct bpf_prog;
+
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index e3a0be2c90ad..7bb66e15b481 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -77,4 +77,9 @@
#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+#ifdef __GENKSYMS__
+/* genksyms gets confused by _Static_assert */
+#define _Static_assert(expr, ...)
+#endif
+
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 41ff31795320..197a79535cc2 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -98,14 +98,13 @@ static inline unsigned int can_bit_time(const struct can_bittiming *bt)
}
/*
- * get_can_dlc(value) - helper macro to cast a given data length code (dlc)
- * to u8 and ensure the dlc value to be max. 8 bytes.
+ * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
+ * Classical CAN frame into a valid data length of max. 8 bytes.
*
* To be used in the CAN netdriver receive path to ensure conformance with
* ISO 11898-1 Chapter 8.4.2.3 (DLC field)
*/
-#define get_can_dlc(i) (min_t(u8, (i), CAN_MAX_DLC))
-#define get_canfd_dlc(i) (min_t(u8, (i), CANFD_MAX_DLC))
+#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
/* Check for outgoing skbs that have not been created by the CAN subsystem */
static inline bool can_skb_headroom_valid(struct net_device *dev,
@@ -171,6 +170,31 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
return skb->len == CANFD_MTU;
}
+/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
+static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
+{
+ /* return len8_dlc as dlc value only if all conditions apply */
+ if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
+ (cf->len == CAN_MAX_DLEN) &&
+ (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
+ return cf->len8_dlc;
+
+ /* return the payload length as dlc value */
+ return cf->len;
+}
+
+/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
+static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
+ const u32 ctrlmode)
+{
+ /* the caller already ensured that dlc is a value from 0 .. 15 */
+ if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
+ cf->len8_dlc = dlc;
+
+ /* limit the payload length 'len' to CAN_MAX_DLEN */
+ cf->len = can_cc_dlc2len(dlc);
+}
+
/* helper to define static CAN controller features at device creation time */
static inline void can_set_static_ctrlmode(struct net_device *dev,
u32 static_mode)
@@ -186,11 +210,11 @@ static inline void can_set_static_ctrlmode(struct net_device *dev,
dev->mtu = CANFD_MTU;
}
-/* get data length from can_dlc with sanitized can_dlc */
-u8 can_dlc2len(u8 can_dlc);
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc);
/* map the sanitized data length to an appropriate data length code */
-u8 can_len2dlc(u8 len);
+u8 can_fd_len2dlc(u8 len);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
unsigned int txqs, unsigned int rxqs);
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
index 5fd627e9da19..f38772fd0c07 100644
--- a/include/linux/can/dev/peak_canfd.h
+++ b/include/linux/can/dev/peak_canfd.h
@@ -282,7 +282,7 @@ static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg)
}
/* return the dlc value from any received message channel_dlc field */
-static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
+static inline u8 pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
{
return msg->channel_dlc >> 4;
}
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index a5dfbaf2470d..868924dec5a1 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -15,7 +15,8 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <crypto/aes.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
struct ccp_device;
struct ccp_cmd;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index fee0b5547cd0..559ee05f86b2 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -668,21 +668,6 @@ struct cgroup_subsys {
*/
bool threaded:1;
- /*
- * If %false, this subsystem is properly hierarchical -
- * configuration, resource accounting and restriction on a parent
- * cgroup cover those of its children. If %true, hierarchy support
- * is broken in some ways - some subsystems ignore hierarchy
- * completely while others are only implemented half-way.
- *
- * It's now disallowed to create nested cgroups if the subsystem is
- * broken and cgroup core will emit a warning message on such
- * cases. Eventually, all subsystems will be made properly
- * hierarchical and this will go away.
- */
- bool broken_hierarchy:1;
- bool warned_broken_hierarchy:1;
-
/* the following two fields are initialized automtically during boot */
int id;
const char *name;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 618838c48313..451c2d26a5db 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -854,7 +854,6 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
struct cgroup_namespace {
- refcount_t count;
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -889,12 +888,12 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
static inline void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
- refcount_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
}
static inline void put_cgroup_ns(struct cgroup_namespace *ns)
{
- if (ns && refcount_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->ns.count))
free_cgroup_ns(ns);
}
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 1de5a1151ee7..ed4070ed41ef 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -98,11 +98,8 @@ extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
unsigned int alloc_flags, int highest_zoneidx);
-extern void defer_compaction(struct zone *zone, int order);
-extern bool compaction_deferred(struct zone *zone, int order);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
-extern bool compaction_restarting(struct zone *zone, int order);
/* Compaction has made some progress and retrying makes sense */
static inline bool compaction_made_progress(enum compact_result result)
@@ -194,15 +191,6 @@ static inline enum compact_result compaction_suitable(struct zone *zone, int ord
return COMPACT_SKIPPED;
}
-static inline void defer_compaction(struct zone *zone, int order)
-{
-}
-
-static inline bool compaction_deferred(struct zone *zone, int order)
-{
- return true;
-}
-
static inline bool compaction_made_progress(enum compact_result result)
{
return false;
diff --git a/include/linux/completion.h b/include/linux/completion.h
index bf8e77001f18..51d9ab079629 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -28,8 +28,7 @@ struct completion {
struct swait_queue_head wait;
};
-#define init_completion_map(x, m) __init_completion(x)
-#define init_completion(x) __init_completion(x)
+#define init_completion_map(x, m) init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
@@ -82,7 +81,7 @@ static inline void complete_release(struct completion *x) {}
* This inline function will initialize a dynamically created completion
* structure.
*/
-static inline void __init_completion(struct completion *x)
+static inline void init_completion(struct completion *x)
{
x->done = 0;
init_swait_queue_head(&x->wait);
diff --git a/include/linux/console.h b/include/linux/console.h
index 4b1e26c4cb42..20874db50bc8 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -62,7 +62,6 @@ struct consw {
int (*con_font_get)(struct vc_data *vc, struct console_font *font);
int (*con_font_default)(struct vc_data *vc,
struct console_font *font, char *name);
- int (*con_font_copy)(struct vc_data *vc, int con);
int (*con_resize)(struct vc_data *vc, unsigned int width,
unsigned int height, unsigned int user);
void (*con_set_palette)(struct vc_data *vc,
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index d53cd331c4dd..bceb06498521 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -51,7 +51,8 @@ static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!context_tracking_enabled())
+ if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) ||
+ !context_tracking_enabled())
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
@@ -63,7 +64,8 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (context_tracking_enabled()) {
+ if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) &&
+ context_tracking_enabled()) {
if (prev_ctx != CONTEXT_KERNEL)
context_tracking_enter(prev_ctx);
}
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index bc56287a1ed1..0042ef362511 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -152,6 +152,7 @@ enum cpuhp_state {
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index f0d895d6ac39..383684e30f12 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
return cpumask_next_and(-1, src1p, src2p);
}
+static inline int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ return cpumask_first(srcp);
+}
+
#define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \
@@ -252,6 +257,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p);
+int cpumask_any_distribute(const struct cpumask *srcp);
/**
* for_each_cpu - iterate over every cpu in a mask
diff --git a/include/linux/device.h b/include/linux/device.h
index 5ed101be7b2e..89bb8b84173e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -351,19 +351,13 @@ enum dl_dev_state {
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
- * @needs_suppliers: Hook to global list of devices waiting for suppliers.
- * @defer_hook: Hook to global list of devices that have deferred sync_state or
- * deferred fw_devlink.
- * @need_for_probe: If needs_suppliers is on a list, this indicates if the
- * suppliers are needed for probe or not.
+ * @defer_sync: Hook to global list of devices that have deferred sync_state.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
- struct list_head needs_suppliers;
- struct list_head defer_hook;
- bool need_for_probe;
+ struct list_head defer_sync;
enum dl_dev_state status;
};
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index e8d470c457d1..e61ec5502019 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -256,6 +256,20 @@ extern void class_destroy(struct class *cls);
/* This is a #define to keep the compiler from merging different
* instances of the __key variable */
+
+/**
+ * class_create - create a struct class structure
+ * @owner: pointer to the module that is to "own" this struct class
+ * @name: pointer to a string for the name of this class.
+ *
+ * This is used to create a struct class pointer that can then be used
+ * in calls to device_create().
+ *
+ * Returns &struct class pointer on success, or ERR_PTR() on error.
+ *
+ * Note, the pointer created here is to be destroyed when finished by
+ * making a call to class_destroy().
+ */
#define class_create(owner, name) \
({ \
static struct lock_class_key __key; \
diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
new file mode 100644
index 000000000000..583a3a1f9447
--- /dev/null
+++ b/include/linux/dma-buf-map.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer to dma-buf-mapped memory, plus helpers.
+ */
+
+#ifndef __DMA_BUF_MAP_H__
+#define __DMA_BUF_MAP_H__
+
+#include <linux/io.h>
+#include <linux/string.h>
+
+/**
+ * DOC: overview
+ *
+ * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
+ * Depending on the location of the buffer, users may have to access it with
+ * I/O operations or memory load/store operations. For example, copying to
+ * system memory could be done with memcpy(), copying to I/O memory would be
+ * done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr, _iomem, src, len);
+ *
+ * When using dma-buf's vmap operation, the returned pointer is encoded as
+ * :c:type:`struct dma_buf_map <dma_buf_map>`.
+ * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
+ * system or I/O memory and a flag that signals the required method of
+ * accessing the buffer. Use the returned instance and the helper functions
+ * to access the buffer's memory in the correct way.
+ *
+ * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
+ * actually independent from the dma-buf infrastructure. When sharing buffers
+ * among devices, drivers have to know the location of the memory to access
+ * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
+ * solves this problem for dma-buf and its users. If other drivers or
+ * sub-systems require similar functionality, the type could be generalized
+ * and moved to a more prominent header file.
+ *
+ * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
+ * considered bad style. Rather then accessing its fields directly, use one
+ * of the provided helper functions, or implement your own. For example,
+ * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
+ * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
+ * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * dma_buf_map_set_vaddr(&map. 0xdeadbeaf);
+ *
+ * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
+ *
+ * .. code-block:: c
+ *
+ * dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf);
+ *
+ * Instances of struct dma_buf_map do not have to be cleaned up, but
+ * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * .. code-block:: c
+ *
+ * dma_buf_map_clear(&map);
+ *
+ * Test if a mapping is valid with either dma_buf_map_is_set() or
+ * dma_buf_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
+ * for equality with dma_buf_map_is_equal(). Mappings the point to different
+ * memory spaces, system or I/O, are never equal. That's even true if both
+ * spaces are located in the same address space, both mappings contain the
+ * same address value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct dma_buf_map sys_map; // refers to system memory
+ * struct dma_buf_map io_map; // refers to I/O memory
+ *
+ * if (dma_buf_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * A set up instance of struct dma_buf_map can be used to access or manipulate
+ * the buffer memory. Depending on the location of the memory, the provided
+ * helpers will pick the correct operations. Data can be copied into the memory
+ * with dma_buf_map_memcpy_to(). The address can be manipulated with
+ * dma_buf_map_incr().
+ *
+ * .. code-block:: c
+ *
+ * const void *src = ...; // source buffer
+ * size_t len = ...; // length of src
+ *
+ * dma_buf_map_memcpy_to(&map, src, len);
+ * dma_buf_map_incr(&map, len); // go to first byte after the memcpy
+ */
+
+/**
+ * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the dma-buf memory is located in I/O
+ * memory, or false otherwise.
+ */
+struct dma_buf_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
+ * @vaddr: A system-memory address
+ */
+#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
+ * @map: The dma-buf mapping structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory
+ * @map: The dma-buf mapping structure
+ * @vaddr_iomem: An I/O-memory address
+ *
+ * Sets the address and the I/O-memory flag.
+ */
+static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map,
+ void __iomem *vaddr_iomem)
+{
+ map->vaddr_iomem = vaddr_iomem;
+ map->is_iomem = true;
+}
+
+/**
+ * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
+ * @lhs: The dma-buf mapping structure
+ * @rhs: A dma-buf mapping structure to compare with
+ *
+ * Two dma-buf mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
+ const struct dma_buf_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
+ * @map: The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
+ * @map: The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
+{
+ return !dma_buf_map_is_null(map);
+}
+
+/**
+ * dma_buf_map_clear - Clears a dma-buf mapping structure
+ * @map: The dma-buf mapping structure
+ *
+ * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void dma_buf_map_clear(struct dma_buf_map *map)
+{
+ if (map->is_iomem) {
+ map->vaddr_iomem = NULL;
+ map->is_iomem = false;
+ } else {
+ map->vaddr = NULL;
+ }
+}
+
+/**
+ * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping
+ * @dst: The dma-buf mapping structure
+ * @src: The source buffer
+ * @len: The number of byte in src
+ *
+ * Copies data into a dma-buf mapping. The source buffer is in system
+ * memory. Depending on the buffer's location, the helper picks the correct
+ * method of accessing the memory.
+ */
+static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len)
+{
+ if (dst->is_iomem)
+ memcpy_toio(dst->vaddr_iomem, src, len);
+ else
+ memcpy(dst->vaddr, src, len);
+}
+
+/**
+ * dma_buf_map_incr - Increments the address stored in a dma-buf mapping
+ * @map: The dma-buf mapping structure
+ * @incr: The number of bytes to increment
+ *
+ * Increments the address stored in a dma-buf mapping. Depending on the
+ * buffer's location, the correct value will be updated.
+ */
+static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr)
+{
+ if (map->is_iomem)
+ map->vaddr_iomem += incr;
+ else
+ map->vaddr += incr;
+}
+
+#endif /* __DMA_BUF_MAP_H__ */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 957b398d30e5..cf72699cb2bc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -13,6 +13,7 @@
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
+#include <linux/dma-buf-map.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -145,7 +146,8 @@ struct dma_buf_ops {
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
- * with the provided &dma_buf_attachment.
+ * with the provided &dma_buf_attachment. The addresses and lengths in
+ * the scatter list are PAGE_SIZE aligned.
*
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
@@ -265,13 +267,13 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- void *(*vmap)(struct dma_buf *);
- void (*vunmap)(struct dma_buf *, void *vaddr);
+ int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
+ void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
};
/**
* struct dma_buf - shared buffer object
- * @size: size of the buffer
+ * @size: size of the buffer; invariant over the lifetime of the buffer.
* @file: file pointer used for sharing buffers across, and for refcounting.
* @attachments: list of dma_buf_attachment that denotes all devices attached,
* protected by dma_resv lock.
@@ -309,7 +311,7 @@ struct dma_buf {
const struct dma_buf_ops *ops;
struct mutex lock;
unsigned vmapping_counter;
- void *vmap_ptr;
+ struct dma_buf_map vmap_ptr;
const char *exp_name;
const char *name;
spinlock_t name_lock;
@@ -403,7 +405,7 @@ struct dma_buf_attachment {
* @exp_name: name of the exporter - useful for debugging.
* @owner: pointer to exporter module - used for refcounting kernel module
* @ops: Attach allocator-defined dma buf ops to the new buffer
- * @size: Size of the buffer
+ * @size: Size of the buffer - invariant over the lifetime of the buffer
* @flags: mode flags for the file
* @resv: reservation-object, NULL to allocate default one
* @priv: Attach private data of allocator to this buffer
@@ -502,6 +504,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
-void *dma_buf_vmap(struct dma_buf *);
-void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 15e8f3d8a895..76d3562d3006 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -175,11 +175,15 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
* @MEM_LRDDR3: Load-Reduced DDR3 memory.
+ * @MEM_LPDDR3: Low-Power DDR3 memory.
* @MEM_DDR4: Unbuffered DDR4 RAM
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
+ * @MEM_LPDDR4: Low-Power DDR4 memory.
+ * @MEM_DDR5: Unbuffered DDR5 RAM
* @MEM_NVDIMM: Non-volatile RAM
+ * @MEM_WIO2: Wide I/O 2.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -200,10 +204,14 @@ enum mem_type {
MEM_DDR3,
MEM_RDDR3,
MEM_LRDDR3,
+ MEM_LPDDR3,
MEM_DDR4,
MEM_RDDR4,
MEM_LRDDR4,
+ MEM_LPDDR4,
+ MEM_DDR5,
MEM_NVDIMM,
+ MEM_WIO2,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -223,13 +231,17 @@ enum mem_type {
#define MEM_FLAG_XDR BIT(MEM_XDR)
#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
+#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
+#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
/**
- * enum edac-type - Error Detection and Correction capabilities and mode
+ * enum edac_type - Error Detection and Correction capabilities and mode
* @EDAC_UNKNOWN: Unknown if ECC is available
* @EDAC_NONE: Doesn't support ECC
* @EDAC_RESERVED: Reserved ECC type
@@ -309,7 +321,7 @@ enum scrub_type {
#define OP_OFFLINE 0x300
/**
- * enum edac_mc_layer - memory controller hierarchy layer
+ * enum edac_mc_layer_type - memory controller hierarchy layer
*
* @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
* @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 5d5b0321da0b..c9a46c4e183b 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -22,6 +22,16 @@
SET_PERSONALITY(ex)
#endif
+#ifndef START_THREAD
+#define START_THREAD(elf_ex, regs, elf_entry, start_stack) \
+ start_thread(regs, elf_entry, start_stack)
+#endif
+
+#if defined(ARCH_HAS_SETUP_ADDITIONAL_PAGES) && !defined(ARCH_SETUP_ADDITIONAL_PAGES)
+#define ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \
+ arch_setup_additional_pages(bprm, interpreter)
+#endif
+
#define ELF32_GNU_PROPERTY_ALIGN 4
#define ELF64_GNU_PROPERTY_ALIGN 8
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 46c3d691f677..de51c1bef27d 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -104,6 +104,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
#endif
}
+#if defined(CONFIG_UM) || defined(CONFIG_IA64)
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
@@ -118,5 +119,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int
elf_core_write_extra_data(struct coredump_params *cprm);
extern size_t elf_core_extra_data_size(void);
+#else
+static inline Elf_Half elf_core_extra_phdrs(void)
+{
+ return 0;
+}
+
+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+{
+ return 1;
+}
+
+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+{
+ return 1;
+}
+
+static inline size_t elf_core_extra_data_size(void)
+{
+ return 0;
+}
+#endif
#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 474f29638d2c..7c581a4c3797 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -13,22 +13,6 @@
* Define dummy _TIF work flags if not defined by the architecture or for
* disabled functionality.
*/
-#ifndef _TIF_SYSCALL_EMU
-# define _TIF_SYSCALL_EMU (0)
-#endif
-
-#ifndef _TIF_SYSCALL_TRACEPOINT
-# define _TIF_SYSCALL_TRACEPOINT (0)
-#endif
-
-#ifndef _TIF_SECCOMP
-# define _TIF_SECCOMP (0)
-#endif
-
-#ifndef _TIF_SYSCALL_AUDIT
-# define _TIF_SYSCALL_AUDIT (0)
-#endif
-
#ifndef _TIF_PATCH_PENDING
# define _TIF_PATCH_PENDING (0)
#endif
@@ -37,28 +21,36 @@
# define _TIF_UPROBE (0)
#endif
+#ifndef _TIF_NOTIFY_SIGNAL
+# define _TIF_NOTIFY_SIGNAL (0)
+#endif
+
/*
- * TIF flags handled in syscall_enter_from_user_mode()
+ * SYSCALL_WORK flags handled in syscall_enter_from_user_mode()
*/
-#ifndef ARCH_SYSCALL_ENTER_WORK
-# define ARCH_SYSCALL_ENTER_WORK (0)
+#ifndef ARCH_SYSCALL_WORK_ENTER
+# define ARCH_SYSCALL_WORK_ENTER (0)
#endif
-#define SYSCALL_ENTER_WORK \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
- _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU | \
- ARCH_SYSCALL_ENTER_WORK)
-
/*
- * TIF flags handled in syscall_exit_to_user_mode()
+ * SYSCALL_WORK flags handled in syscall_exit_to_user_mode()
*/
-#ifndef ARCH_SYSCALL_EXIT_WORK
-# define ARCH_SYSCALL_EXIT_WORK (0)
+#ifndef ARCH_SYSCALL_WORK_EXIT
+# define ARCH_SYSCALL_WORK_EXIT (0)
#endif
-#define SYSCALL_EXIT_WORK \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK)
+#define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \
+ SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_EMU | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ ARCH_SYSCALL_WORK_ENTER)
+#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ ARCH_SYSCALL_WORK_EXIT)
/*
* TIF flags handled in exit_to_user_mode_loop()
@@ -69,7 +61,7 @@
#define EXIT_TO_USER_MODE_WORK \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \
+ _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
ARCH_EXIT_TO_USER_MODE_WORK)
/**
@@ -110,6 +102,27 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs
#endif
/**
+ * enter_from_user_mode - Establish state when coming from user mode
+ *
+ * Syscall/interrupt entry disables interrupts, but user mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct and interrupts are still
+ * disabled. The subsequent functions can be instrumented.
+ *
+ * This is invoked when there is architecture specific functionality to be
+ * done between establishing state and enabling interrupts. The caller must
+ * enable interrupts before invoking syscall_enter_from_user_mode_work().
+ */
+void enter_from_user_mode(struct pt_regs *regs);
+
+/**
* syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
* @regs: Pointer to currents pt_regs
*
@@ -118,7 +131,8 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs
* function returns all state is correct, interrupts are enabled and the
* subsequent functions can be instrumented.
*
- * This handles lockdep, RCU (context tracking) and tracing state.
+ * This handles lockdep, RCU (context tracking) and tracing state, i.e.
+ * the functionality provided by enter_from_user_mode().
*
* This is invoked when there is extra architecture specific functionality
* to be done between establishing state and handling user mode entry work.
@@ -144,8 +158,8 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
*
* It handles the following work items:
*
- * 1) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
- * __secure_computing(), trace_sys_enter()
+ * 1) syscall_work flag dependent invocations of
+ * arch_syscall_enter_tracehook(), __secure_computing(), trace_sys_enter()
* 2) Invocation of audit_syscall_entry()
*/
long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
@@ -259,12 +273,13 @@ static __always_inline void arch_exit_to_user_mode(void) { }
#endif
/**
- * arch_do_signal - Architecture specific signal delivery function
+ * arch_do_signal_or_restart - Architecture specific signal delivery function
* @regs: Pointer to currents pt_regs
+ * @has_signal: actual signal to handle
*
* Invoked from exit_to_user_mode_loop().
*/
-void arch_do_signal(struct pt_regs *regs);
+void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal);
/**
* arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit()
@@ -286,6 +301,41 @@ static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
#endif
/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall/interrupt exit enables interrupts, but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Invoke architecture specific last minute exit code, e.g. speculation
+ * mitigations, etc.: arch_exit_to_user_mode()
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code when syscall_exit_to_user_mode()
+ * is not suitable as the last step before returning to userspace. Must be
+ * invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke syscall_exit_to_user_mode_work() before this.
+ */
+void exit_to_user_mode(void);
+
+/**
+ * syscall_exit_to_user_mode_work - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
+ * exit_to_user_mode() to perform the final transition to user mode.
+ *
+ * Calling convention is the same as for syscall_exit_to_user_mode() and it
+ * returns with all work handled and interrupts disabled. The caller must
+ * invoke exit_to_user_mode() before actually switching to user mode to
+ * make the final state transitions. Interrupts must stay disabled between
+ * return from this function and the invocation of exit_to_user_mode().
+ */
+void syscall_exit_to_user_mode_work(struct pt_regs *regs);
+
+/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
* @regs: Pointer to currents pt_regs
*
@@ -307,8 +357,12 @@ static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
* - Architecture specific one time work arch_exit_to_user_mode_prepare()
* - Address limit and lockdep checks
*
- * 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes
- * arch_exit_to_user_mode() to handle e.g. speculation mitigations
+ * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
+ * functionality in exit_to_user_mode().
+ *
+ * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
+ * exit_to_user_mode(). This function is preferred unless there is a
+ * compelling architectural reason to use the seperate functions.
*/
void syscall_exit_to_user_mode(struct pt_regs *regs);
@@ -341,8 +395,26 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs);
void irqentry_exit_to_user_mode(struct pt_regs *regs);
#ifndef irqentry_state
+/**
+ * struct irqentry_state - Opaque object for exception state storage
+ * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
+ * exit path has to invoke rcu_irq_exit().
+ * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
+ * lockdep state is restored correctly on exit from nmi.
+ *
+ * This opaque object is filled in by the irqentry_*_enter() functions and
+ * must be passed back into the corresponding irqentry_*_exit() functions
+ * when the exception is complete.
+ *
+ * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
+ * and all members private. Descriptions of the members are provided to aid in
+ * the maintenance of the irqentry_*() functions.
+ */
typedef struct irqentry_state {
- bool exit_rcu;
+ union {
+ bool exit_rcu;
+ bool lockdep;
+ };
} irqentry_state_t;
#endif
@@ -392,7 +464,7 @@ void irqentry_exit_cond_resched(void);
* @state: Return value from matching call to irqentry_enter()
*
* Depending on the return target (kernel/user) this runs the necessary
- * preemption and work checks if possible and reguired and returns to
+ * preemption and work checks if possible and required and returns to
* the caller with interrupts disabled and no further work pending.
*
* This is the last action before returning to the low level ASM code which
@@ -402,4 +474,23 @@ void irqentry_exit_cond_resched(void);
*/
void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
+/**
+ * irqentry_nmi_enter - Handle NMI entry
+ * @regs: Pointer to currents pt_regs
+ *
+ * Similar to irqentry_enter() but taking care of the NMI constraints.
+ */
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_nmi_exit - Handle return from NMI handling
+ * @regs: Pointer to pt_regs (NMI entry regs)
+ * @irq_state: Return value from matching call to irqentry_nmi_enter()
+ *
+ * Last action before returning to the low level assembly code.
+ *
+ * Counterpart to irqentry_nmi_enter().
+ */
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+
#endif
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 0cef17afb41a..9b93f8584ff7 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -11,8 +11,8 @@
# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
#endif
-#define XFER_TO_GUEST_MODE_WORK \
- (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+#define XFER_TO_GUEST_MODE_WORK \
+ (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
_TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
struct kvm_vcpu;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 6408b446051f..e3da25b51ae4 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -215,6 +215,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1b62397bd124..29c27656165b 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -21,7 +21,7 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/sockptr.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <net/sch_generic.h>
diff --git a/include/linux/font.h b/include/linux/font.h
index b5b312c19e46..abf1442ce719 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -16,7 +16,8 @@
struct font_desc {
int idx;
const char *name;
- int width, height;
+ unsigned int width, height;
+ unsigned int charcount;
const void *data;
int pref;
};
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index e8ca62b2cb5b..2bc3030a69e5 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -198,6 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr);
int fpga_mgr_register(struct fpga_manager *mgr);
void fpga_mgr_unregister(struct fpga_manager *mgr);
+int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr);
+
struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
const struct fpga_manager_ops *mops,
void *priv);
diff --git a/include/linux/freelist.h b/include/linux/freelist.h
new file mode 100644
index 000000000000..fc1842b96469
--- /dev/null
+++ b/include/linux/freelist.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+#ifndef FREELIST_H
+#define FREELIST_H
+
+#include <linux/atomic.h>
+
+/*
+ * Copyright: cameron@moodycamel.com
+ *
+ * A simple CAS-based lock-free free list. Not the fastest thing in the world
+ * under heavy contention, but simple and correct (assuming nodes are never
+ * freed until after the free list is destroyed), and fairly speedy under low
+ * contention.
+ *
+ * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
+ */
+
+struct freelist_node {
+ atomic_t refs;
+ struct freelist_node *next;
+};
+
+struct freelist_head {
+ struct freelist_node *head;
+};
+
+#define REFS_ON_FREELIST 0x80000000
+#define REFS_MASK 0x7FFFFFFF
+
+static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
+{
+ /*
+ * Since the refcount is zero, and nobody can increase it once it's
+ * zero (except us, and we run only one copy of this method per node at
+ * a time, i.e. the single thread case), then we know we can safely
+ * change the next pointer of the node; however, once the refcount is
+ * back above zero, then other threads could increase it (happens under
+ * heavy contention, when the refcount goes to zero in between a load
+ * and a refcount increment of a node in try_get, then back up to
+ * something non-zero, then the refcount increment is done by the other
+ * thread) -- so if the CAS to add the node to the actual list fails,
+ * decrese the refcount and leave the add operation to the next thread
+ * who puts the refcount back to zero (which could be us, hence the
+ * loop).
+ */
+ struct freelist_node *head = READ_ONCE(list->head);
+
+ for (;;) {
+ WRITE_ONCE(node->next, head);
+ atomic_set_release(&node->refs, 1);
+
+ if (!try_cmpxchg_release(&list->head, &head, node)) {
+ /*
+ * Hmm, the add failed, but we can only try again when
+ * the refcount goes back to zero.
+ */
+ if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
+ continue;
+ }
+ return;
+ }
+}
+
+static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
+{
+ /*
+ * We know that the should-be-on-freelist bit is 0 at this point, so
+ * it's safe to set it using a fetch_add.
+ */
+ if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
+ /*
+ * Oh look! We were the last ones referencing this node, and we
+ * know we want to add it to the free list, so let's do it!
+ */
+ __freelist_add(node, list);
+ }
+}
+
+static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
+{
+ struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
+ unsigned int refs;
+
+ while (head) {
+ prev = head;
+ refs = atomic_read(&head->refs);
+ if ((refs & REFS_MASK) == 0 ||
+ !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
+ head = smp_load_acquire(&list->head);
+ continue;
+ }
+
+ /*
+ * Good, reference count has been incremented (it wasn't at
+ * zero), which means we can read the next and not worry about
+ * it changing between now and the time we do the CAS.
+ */
+ next = READ_ONCE(head->next);
+ if (try_cmpxchg_acquire(&list->head, &head, next)) {
+ /*
+ * Yay, got the node. This means it was on the list,
+ * which means should-be-on-freelist must be false no
+ * matter the refcount (because nobody else knows it's
+ * been taken off yet, it can't have been put back on).
+ */
+ WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
+
+ /*
+ * Decrease refcount twice, once for our ref, and once
+ * for the list's ref.
+ */
+ atomic_fetch_add(-2, &head->refs);
+
+ return head;
+ }
+
+ /*
+ * OK, the head must have changed on us, but we still need to decrement
+ * the refcount we increased.
+ */
+ refs = atomic_fetch_add(-1, &prev->refs);
+ if (refs == REFS_ON_FREELIST + 1)
+ __freelist_add(prev, list);
+ }
+
+ return NULL;
+}
+
+#endif /* FREELIST_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8667d0cdc71e..1fcc2b00582b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3230,7 +3230,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
{
struct inode *inode;
- if (!vma->vm_file)
+ if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
return false;
if (!vma_is_dax(vma))
return false;
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index a8f7a43f031b..d23156d1ac94 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -75,7 +75,7 @@ struct fscrypt_operations {
static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
{
/*
- * Pairs with the cmpxchg_release() in fscrypt_get_encryption_info().
+ * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
* I.e., another task may publish ->i_crypt_info concurrently, executing
* a RELEASE barrier. We need to use smp_load_acquire() here to safely
* ACQUIRE the memory the other task published.
@@ -111,6 +111,35 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
dentry->d_flags &= ~DCACHE_NOKEY_NAME;
}
+/**
+ * fscrypt_is_nokey_name() - test whether a dentry is a no-key name
+ * @dentry: the dentry to check
+ *
+ * This returns true if the dentry is a no-key dentry. A no-key dentry is a
+ * dentry that was created in an encrypted directory that hasn't had its
+ * encryption key added yet. Such dentries may be either positive or negative.
+ *
+ * When a filesystem is asked to create a new filename in an encrypted directory
+ * and the new filename's dentry is a no-key dentry, it must fail the operation
+ * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(),
+ * ->rename(), and ->link(). (However, ->rename() and ->link() are already
+ * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().)
+ *
+ * This is necessary because creating a filename requires the directory's
+ * encryption key, but just checking for the key on the directory inode during
+ * the final filesystem operation doesn't guarantee that the key was available
+ * during the preceding dentry lookup. And the key must have already been
+ * available during the dentry lookup in order for it to have been checked
+ * whether the filename already exists in the directory and for the new file's
+ * dentry not to be invalidated due to it incorrectly having the no-key flag.
+ *
+ * Return: %true if the dentry is a no-key name
+ */
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_NOKEY_NAME;
+}
+
/* crypto.c */
void fscrypt_enqueue_decrypt_work(struct work_struct *);
@@ -171,7 +200,6 @@ int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg);
int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg);
/* keysetup.c */
-int fscrypt_get_encryption_info(struct inode *inode);
int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
bool *encrypt_ret);
void fscrypt_put_encryption_info(struct inode *inode);
@@ -213,6 +241,8 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned int flags);
int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct fscrypt_name *fname);
+int __fscrypt_prepare_readdir(struct inode *dir);
+int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr);
int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags);
int fscrypt_prepare_symlink(struct inode *dir, const char *target,
@@ -244,6 +274,11 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
{
}
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return false;
+}
+
/* crypto.c */
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
@@ -372,10 +407,6 @@ static inline int fscrypt_ioctl_get_key_status(struct file *filp,
}
/* keysetup.c */
-static inline int fscrypt_get_encryption_info(struct inode *inode)
-{
- return -EOPNOTSUPP;
-}
static inline int fscrypt_prepare_new_inode(struct inode *dir,
struct inode *inode,
@@ -503,6 +534,17 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
+static inline int __fscrypt_prepare_readdir(struct inode *dir)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags,
unsigned int flags)
@@ -642,32 +684,6 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode)
}
/**
- * fscrypt_require_key() - require an inode's encryption key
- * @inode: the inode we need the key for
- *
- * If the inode is encrypted, set up its encryption key if not already done.
- * Then require that the key be present and return -ENOKEY otherwise.
- *
- * No locks are needed, and the key will live as long as the struct inode --- so
- * it won't go away from under you.
- *
- * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
- * if a problem occurred while setting up the encryption key.
- */
-static inline int fscrypt_require_key(struct inode *inode)
-{
- if (IS_ENCRYPTED(inode)) {
- int err = fscrypt_get_encryption_info(inode);
-
- if (err)
- return err;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
- return 0;
-}
-
-/**
* fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted
* directory
* @old_dentry: an existing dentry for the inode being linked
@@ -676,8 +692,7 @@ static inline int fscrypt_require_key(struct inode *inode)
*
* A new link can only be added to an encrypted directory if the directory's
* encryption key is available --- since otherwise we'd have no way to encrypt
- * the filename. Therefore, we first set up the directory's encryption key (if
- * not already done) and return an error if it's unavailable.
+ * the filename.
*
* We also verify that the link will not violate the constraint that all files
* in an encrypted directory tree use the same encryption policy.
@@ -738,8 +753,9 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir,
*
* Prepare for ->lookup() in a directory which may be encrypted by determining
* the name that will actually be used to search the directory on-disk. If the
- * directory's encryption key is available, then the lookup is assumed to be by
- * plaintext name; otherwise, it is assumed to be by no-key name.
+ * directory's encryption policy is supported by this kernel and its encryption
+ * key is available, then the lookup is assumed to be by plaintext name;
+ * otherwise, it is assumed to be by no-key name.
*
* This also installs a custom ->d_revalidate() method which will invalidate the
* dentry if it was created without the key and the key is later added.
@@ -763,6 +779,26 @@ static inline int fscrypt_prepare_lookup(struct inode *dir,
}
/**
+ * fscrypt_prepare_readdir() - prepare to read a possibly-encrypted directory
+ * @dir: the directory inode
+ *
+ * If the directory is encrypted and it doesn't already have its encryption key
+ * set up, try to set it up so that the filenames will be listed in plaintext
+ * form rather than in no-key form.
+ *
+ * Return: 0 on success; -errno on error. Note that the encryption key being
+ * unavailable is not considered an error. It is also not an error if
+ * the encryption policy is unsupported by this kernel; that is treated
+ * like the key being unavailable, so that files can still be deleted.
+ */
+static inline int fscrypt_prepare_readdir(struct inode *dir)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_readdir(dir);
+ return 0;
+}
+
+/**
* fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's
* attributes
* @dentry: dentry through which the inode is being changed
@@ -783,8 +819,8 @@ static inline int fscrypt_prepare_lookup(struct inode *dir,
static inline int fscrypt_prepare_setattr(struct dentry *dentry,
struct iattr *attr)
{
- if (attr->ia_valid & ATTR_SIZE)
- return fscrypt_require_key(d_inode(dentry));
+ if (IS_ENCRYPTED(d_inode(dentry)))
+ return __fscrypt_prepare_setattr(dentry, attr);
return 0;
}
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 9506f8ec0974..fde4ad97564c 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -10,14 +10,32 @@
#define _LINUX_FWNODE_H_
#include <linux/types.h>
+#include <linux/list.h>
struct fwnode_operations;
struct device;
+/*
+ * fwnode link flags
+ *
+ * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
+ */
+#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
struct device *dev;
+ struct list_head suppliers;
+ struct list_head consumers;
+ u8 flags;
+};
+
+struct fwnode_link {
+ struct fwnode_handle *supplier;
+ struct list_head s_hook;
+ struct fwnode_handle *consumer;
+ struct list_head c_hook;
};
/**
@@ -68,44 +86,8 @@ struct fwnode_reference_args {
* endpoint node.
* @graph_get_port_parent: Return the parent node of a port node.
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
- * @add_links: Called after the device corresponding to the fwnode is added
- * using device_add(). The function is expected to create device
- * links to all the suppliers of the device that are available at
- * the time this function is called. The function must NOT stop
- * at the first failed device link if other unlinked supplier
- * devices are present in the system. This is necessary for the
- * driver/bus sync_state() callbacks to work correctly.
- *
- * For example, say Device-C depends on suppliers Device-S1 and
- * Device-S2 and the dependency is listed in that order in the
- * firmware. Say, S1 gets populated from the firmware after
- * late_initcall_sync(). Say S2 is populated and probed way
- * before that in device_initcall(). When C is populated, if this
- * add_links() function doesn't continue past a "failed linking to
- * S1" and continue linking C to S2, then S2 will get a
- * sync_state() callback before C is probed. This is because from
- * the perspective of S2, C was never a consumer when its
- * sync_state() evaluation is done. To avoid this, the add_links()
- * function has to go through all available suppliers of the
- * device (that corresponds to this fwnode) and link to them
- * before returning.
- *
- * If some suppliers are not yet available (indicated by an error
- * return value), this function will be called again when other
- * devices are added to allow creating device links to any newly
- * available suppliers.
- *
- * Return 0 if device links have been successfully created to all
- * the known suppliers of this device or if the supplier
- * information is not known.
- *
- * Return -ENODEV if the suppliers needed for probing this device
- * have not been registered yet (because device links can only be
- * created to devices registered with the driver core).
- *
- * Return -EAGAIN if some of the suppliers of this device have not
- * been registered yet, but none of those suppliers are necessary
- * for probing the device.
+ * @add_links: Create fwnode links to all the suppliers of the fwnode. Return
+ * zero on success, a negative error code otherwise.
*/
struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
@@ -145,8 +127,7 @@ struct fwnode_operations {
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
- int (*add_links)(const struct fwnode_handle *fwnode,
- struct device *dev);
+ int (*add_links)(struct fwnode_handle *fwnode);
};
#define fwnode_has_op(fwnode, op) \
@@ -170,8 +151,16 @@ struct fwnode_operations {
} while (false)
#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
+static inline void fwnode_init(struct fwnode_handle *fwnode,
+ const struct fwnode_operations *ops)
+{
+ fwnode->ops = ops;
+ INIT_LIST_HEAD(&fwnode->consumers);
+ INIT_LIST_HEAD(&fwnode->suppliers);
+}
+
extern u32 fw_devlink_get_flags(void);
-void fw_devlink_pause(void);
-void fw_devlink_resume(void);
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
+void fwnode_links_purge(struct fwnode_handle *fwnode);
#endif
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index eeae59d3ceb7..35d21fddaf2d 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -89,7 +89,7 @@ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
nla_get_u64, nla_put_u64_0pad, false)
#define __str_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
- nla_strlcpy, nla_put, false)
+ nla_strscpy, nla_put, false)
#define __bin_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \
nla_memcpy, nla_put, false)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index c603237e006c..6e479e9c48ce 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -580,8 +580,6 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-extern void free_unref_page(struct page *page);
-extern void free_unref_page_list(struct list_head *list);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 265a099cd3b8..12be1601fd84 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -13,9 +13,9 @@ static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
{
const unsigned long addr = (unsigned long)ptr;
- writel(lower_32_bits(addr), portl);
+ __raw_writel(lower_32_bits(addr), portl);
#ifdef CONFIG_64BIT
- writel(upper_32_bits(addr), porth);
+ __raw_writel(upper_32_bits(addr), porth);
#endif
}
@@ -23,9 +23,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr,
void __iomem *portl,
void __iomem *porth)
{
- writel(lower_32_bits(addr), portl);
+ __raw_writel(lower_32_bits(addr), portl);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(upper_32_bits(addr), porth);
+ __raw_writel(upper_32_bits(addr), porth);
#endif
}
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
new file mode 100644
index 000000000000..1bbe96dc8be6
--- /dev/null
+++ b/include/linux/highmem-internal.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HIGHMEM_INTERNAL_H
+#define _LINUX_HIGHMEM_INTERNAL_H
+
+/*
+ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
+ */
+#ifdef CONFIG_KMAP_LOCAL
+void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+void kunmap_local_indexed(void *vaddr);
+void kmap_local_fork(struct task_struct *tsk);
+void __kmap_local_sched_out(void);
+void __kmap_local_sched_in(void);
+static inline void kmap_assert_nomap(void)
+{
+ DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
+}
+#else
+static inline void kmap_local_fork(struct task_struct *tsk) { }
+static inline void kmap_assert_nomap(void) { }
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+void __kmap_flush_unused(void);
+struct page *__kmap_to_page(void *addr);
+
+static inline void *kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ addr = page_address(page);
+ else
+ addr = kmap_high(page);
+ kmap_flush_tlb((unsigned long)addr);
+ return addr;
+}
+
+static inline void kunmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return __kmap_to_page(addr);
+}
+
+static inline void kmap_flush_unused(void)
+{
+ __kmap_flush_unused();
+}
+
+static inline void *kmap_local_page(struct page *page)
+{
+ return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(void *vaddr)
+{
+ kunmap_local_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ preempt_disable();
+ pagefault_disable();
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ return kmap_atomic_prot(page, kmap_prot);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ preempt_disable();
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_atomic(void *addr)
+{
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+ preempt_enable();
+}
+
+unsigned int __nr_free_highpages(void);
+extern atomic_long_t _totalhigh_pages;
+
+static inline unsigned int nr_free_highpages(void)
+{
+ return __nr_free_highpages();
+}
+
+static inline unsigned long totalhigh_pages(void)
+{
+ return (unsigned long)atomic_long_read(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_inc(void)
+{
+ atomic_long_inc(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_add(long count)
+{
+ atomic_long_add(count, &_totalhigh_pages);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return virt_to_page(addr);
+}
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+static inline void kunmap_high(struct page *page) { }
+static inline void kmap_flush_unused(void) { }
+
+static inline void kunmap(struct page *page)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(page_address(page));
+#endif
+}
+
+static inline void *kmap_local_page(struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+#endif
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_atomic(page);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ return kmap_atomic(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_atomic(void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+#endif
+ pagefault_enable();
+ preempt_enable();
+}
+
+static inline unsigned int nr_free_highpages(void) { return 0; }
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
+
+#endif /* CONFIG_HIGHMEM */
+
+/*
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+ */
+#define kunmap_atomic(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_atomic(__addr); \
+} while (0)
+
+#define kunmap_local(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_local(__addr); \
+} while (0)
+
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 14e6202ce47f..d2c70d3772a3 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -11,217 +11,137 @@
#include <asm/cacheflush.h>
-#ifndef ARCH_HAS_FLUSH_ANON_PAGE
-static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
-{
-}
-#endif
-
-#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-}
-#endif
-
-#include <asm/kmap_types.h>
-
-#ifdef CONFIG_HIGHMEM
-extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
-extern void kunmap_atomic_high(void *kvaddr);
-#include <asm/highmem.h>
-
-#ifndef ARCH_HAS_KMAP_FLUSH_TLB
-static inline void kmap_flush_tlb(unsigned long addr) { }
-#endif
-
-#ifndef kmap_prot
-#define kmap_prot PAGE_KERNEL
-#endif
-
-void *kmap_high(struct page *page);
-static inline void *kmap(struct page *page)
-{
- void *addr;
-
- might_sleep();
- if (!PageHighMem(page))
- addr = page_address(page);
- else
- addr = kmap_high(page);
- kmap_flush_tlb((unsigned long)addr);
- return addr;
-}
-
-void kunmap_high(struct page *page);
-
-static inline void kunmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
+#include "highmem-internal.h"
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
+/**
+ * kmap - Map a page for long term usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can only be invoked from preemptible task context because on 32bit
+ * systems with CONFIG_HIGHMEM enabled this function might sleep.
*
- * However when holding an atomic kmap it is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
+ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
+ * this returns the virtual address of the direct kernel mapping.
*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
+ * The returned virtual address is globally visible and valid up to the
+ * point where it is unmapped via kunmap(). The pointer can be handed to
+ * other contexts.
+ *
+ * For highmem pages on 32bit systems this can be slow as the mapping space
+ * is limited and protected by a global lock. In case that there is no
+ * mapping slot available the function blocks until a slot is released via
+ * kunmap().
*/
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_atomic_high_prot(page, prot);
-}
-#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
+static inline void *kmap(struct page *page);
-/* declarations for linux/mm/highmem.c */
-unsigned int nr_free_highpages(void);
-extern atomic_long_t _totalhigh_pages;
-static inline unsigned long totalhigh_pages(void)
-{
- return (unsigned long)atomic_long_read(&_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_inc(void)
-{
- atomic_long_inc(&_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_dec(void)
-{
- atomic_long_dec(&_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_add(long count)
-{
- atomic_long_add(count, &_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_set(long val)
-{
- atomic_long_set(&_totalhigh_pages, val);
-}
-
-void kmap_flush_unused(void);
-
-struct page *kmap_to_page(void *addr);
-
-#else /* CONFIG_HIGHMEM */
+/**
+ * kunmap - Unmap the virtual address mapped by kmap()
+ * @addr: Virtual address to be unmapped
+ *
+ * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
+ * pages in the low memory area.
+ */
+static inline void kunmap(struct page *page);
-static inline unsigned int nr_free_highpages(void) { return 0; }
+/**
+ * kmap_to_page - Get the page for a kmap'ed address
+ * @addr: The address to look up
+ *
+ * Returns: The page which is mapped to @addr.
+ */
+static inline struct page *kmap_to_page(void *addr);
-static inline struct page *kmap_to_page(void *addr)
-{
- return virt_to_page(addr);
-}
+/**
+ * kmap_flush_unused - Flush all unused kmap mappings in order to
+ * remove stray mappings
+ */
+static inline void kmap_flush_unused(void);
-static inline unsigned long totalhigh_pages(void) { return 0UL; }
+/**
+ * kmap_local_page - Map a page for temporary usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can be invoked from any context.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation:
+ *
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
-static inline void *kmap(struct page *page)
-{
- might_sleep();
- return page_address(page);
-}
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Effectively a wrapper around kmap_local_page() which disables pagefaults
+ * and preemption.
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
+ */
+static inline void *kmap_atomic(struct page *page);
-static inline void kunmap_high(struct page *page)
-{
-}
+/**
+ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
+ * @addr: Virtual address to be unmapped
+ *
+ * Counterpart to kmap_atomic().
+ *
+ * Effectively a wrapper around kunmap_local() which additionally undoes
+ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * preemption.
+ */
-static inline void kunmap(struct page *page)
-{
-#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(page_address(page));
-#endif
-}
+/* Highmem related interfaces for management code */
+static inline unsigned int nr_free_highpages(void);
+static inline unsigned long totalhigh_pages(void);
-static inline void *kmap_atomic(struct page *page)
+#ifndef ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
- preempt_disable();
- pagefault_disable();
- return page_address(page);
}
-#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-
-static inline void kunmap_atomic_high(void *addr)
-{
- /*
- * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
- * handles re-enabling faults + preemption
- */
-#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(addr);
#endif
-}
-
-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-#define kmap_flush_unused() do {} while(0)
-
-#endif /* CONFIG_HIGHMEM */
-
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
-
-static inline int kmap_atomic_idx_push(void)
+#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
{
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
-#endif
- return idx;
}
-
-static inline int kmap_atomic_idx(void)
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
- return __this_cpu_read(__kmap_atomic_idx) - 1;
}
-
-static inline void kmap_atomic_idx_pop(void)
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
-
- BUG_ON(idx < 0);
-#else
- __this_cpu_dec(__kmap_atomic_idx);
-#endif
}
-
#endif
-/*
- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
- */
-#define kunmap_atomic(addr) \
-do { \
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
- kunmap_atomic_high(addr); \
- pagefault_enable(); \
- preempt_enable(); \
-} while (0)
-
-
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
@@ -284,13 +204,22 @@ static inline void clear_highpage(struct page *page)
kunmap_atomic(kaddr);
}
+/*
+ * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
+ * If we pass in a head page, we can zero up to the size of the compound page.
+ */
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2);
+#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
{
void *kaddr = kmap_atomic(page);
+ unsigned int i;
- BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+ BUG_ON(end1 > page_size(page) || end2 > page_size(page));
if (end1 > start1)
memset(kaddr + start1, 0, end1 - start1);
@@ -299,8 +228,10 @@ static inline void zero_user_segments(struct page *page,
memset(kaddr + start2, 0, end2 - start2);
kunmap_atomic(kaddr);
- flush_dcache_page(page);
+ for (i = 0; i < compound_nr(page); i++)
+ flush_dcache_page(page + i);
}
+#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
static inline void zero_user_segment(struct page *page,
unsigned start, unsigned end)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 107cedd7019a..bb5e7b0a4274 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -447,6 +447,10 @@ static inline void hrtimer_restart(struct hrtimer *timer)
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+/**
+ * hrtimer_get_remaining - get remaining time for the timer
+ * @timer: the timer to read
+ */
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
@@ -458,7 +462,7 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
/**
- * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * hrtimer_is_queued - check, whether the timer is on one of the queues
* @timer: Timer to check
*
* Returns: True if the timer is queued, false otherwise
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 0365aa97f8e7..6a19f35f836b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -7,43 +7,37 @@
#include <linux/fs.h> /* only for vma_is_dax() */
-extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
-extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
-extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
- struct vm_area_struct *vma);
+vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
+int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+ struct vm_area_struct *vma);
+void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
+void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
#endif
-extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
-extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr,
- pmd_t *pmd,
- unsigned int flags);
-extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr, unsigned long next);
-extern int zap_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr);
-extern int zap_huge_pud(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pud_t *pud, unsigned long addr);
-extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr,
- pmd_t *old_pmd, pmd_t *new_pmd);
-extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot,
- unsigned long cp_flags);
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
+struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd,
+ unsigned int flags);
+bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, unsigned long next);
+int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr);
+int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr);
+bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
+int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
+ pgprot_t newprot, unsigned long cp_flags);
vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
pgprot_t pgprot, bool write);
@@ -100,13 +94,13 @@ enum transparent_hugepage_flag {
struct kobject;
struct kobj_attribute;
-extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count,
- enum transparent_hugepage_flag flag);
-extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf,
- enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count,
+ enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf,
+ enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
@@ -179,12 +173,11 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
-extern unsigned long thp_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags);
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
-extern void prep_transhuge_page(struct page *page);
-extern void free_transhuge_page(struct page *page);
+void prep_transhuge_page(struct page *page);
+void free_transhuge_page(struct page *page);
bool is_transparent_hugepage(struct page *page);
bool can_split_huge_page(struct page *page, int *pextra_pins);
@@ -222,16 +215,12 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-extern int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice);
-extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next);
-extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma);
-extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
- struct vm_area_struct *vma);
+int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
+ int advice);
+void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, long adjust_next);
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
+spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
static inline int is_swap_pmd(pmd_t pmd)
{
@@ -294,7 +283,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags, struct dev_pagemap **pgmap);
-extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 770408b2fdaf..72ff75fb1971 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1261,6 +1261,7 @@ struct ieee80211_mgmt {
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -3417,6 +3418,8 @@ struct ieee80211_multiple_bssid_configuration {
#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19)
#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20)
+#define WLAN_AKM_SUITE_WFA_DPP SUITE(WLAN_OUI_WFA, 2)
+
#define WLAN_MAX_KEY_LEN 32
#define WLAN_PMK_NAME_LEN 16
@@ -3427,6 +3430,7 @@ struct ieee80211_multiple_bssid_configuration {
#define WLAN_OUI_WFA 0x506f9a
#define WLAN_OUI_TYPE_WFA_P2P 9
+#define WLAN_OUI_TYPE_WFA_DPP 0x1A
#define WLAN_OUI_MICROSOFT 0x0050f2
#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
@@ -3832,15 +3836,15 @@ static inline bool for_each_element_completed(const struct element *element,
#define WLAN_RSNX_CAPA_SAE_H2E BIT(5)
/*
- * reduced neighbor report, based on Draft P802.11ax_D5.0,
- * section 9.4.2.170
+ * reduced neighbor report, based on Draft P802.11ax_D6.1,
+ * section 9.4.2.170 and accepted contributions.
*/
#define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03
#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04
#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08
#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 8
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 12
+#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9
+#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13
#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01
#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 556caed00258..b979005ea39c 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -25,6 +25,7 @@ struct br_ip {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
+ unsigned char mac_addr[ETH_ALEN];
} dst;
__be16 proto;
__u16 vid;
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
deleted file mode 100644
index 52224de798aa..000000000000
--- a/include/linux/if_frad.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are
- * created for each DLCI associated with a FRAD. The FRAD driver
- * is not truly a network device, but the lower level device
- * handler. This allows other FRAD manufacturers to use the DLCI
- * code, including its RFC1490 encapsulation alongside the current
- * implementation for the Sangoma cards.
- *
- * Version: @(#)if_ifrad.h 0.15 31 Mar 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan changed structure defs (packed)
- * re-arranged flags
- * added DLCI_RET vars
- */
-#ifndef _FRAD_H_
-#define _FRAD_H_
-
-#include <uapi/linux/if_frad.h>
-
-
-#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE)
-
-/* these are the fields of an RFC 1490 header */
-struct frhdr
-{
- unsigned char control;
-
- /* for IP packets, this can be the NLPID */
- unsigned char pad;
-
- unsigned char NLPID;
- unsigned char OUI[3];
- __be16 PID;
-
-#define IP_NLPID pad
-} __packed;
-
-/* see RFC 1490 for the definition of the following */
-#define FRAD_I_UI 0x03
-
-#define FRAD_P_PADDING 0x00
-#define FRAD_P_Q933 0x08
-#define FRAD_P_SNAP 0x80
-#define FRAD_P_CLNP 0x81
-#define FRAD_P_IP 0xCC
-
-struct dlci_local
-{
- struct net_device *master;
- struct net_device *slave;
- struct dlci_conf config;
- int configured;
- struct list_head list;
-
- /* callback function */
- void (*receive)(struct sk_buff *skb, struct net_device *);
-};
-
-struct frad_local
-{
- /* devices which this FRAD is slaved to */
- struct net_device *master[CONFIG_DLCI_MAX];
- short dlci[CONFIG_DLCI_MAX];
-
- struct frad_conf config;
- int configured; /* has this device been configured */
- int initialized; /* mem_start, port, irq set ? */
-
- /* callback functions */
- int (*activate)(struct net_device *, struct net_device *);
- int (*deactivate)(struct net_device *, struct net_device *);
- int (*assoc)(struct net_device *, struct net_device *);
- int (*deassoc)(struct net_device *, struct net_device *);
- int (*dlci_conf)(struct net_device *, struct net_device *, int get);
-
- /* fields that are used by the Sangoma SDLA cards */
- struct timer_list timer;
- struct net_device *dev;
- int type; /* adapter type */
- int state; /* state of the S502/8 control latch */
- int buffer; /* current buffer for S508 firmware */
-};
-
-#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */
-
-extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *));
-
-#endif
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index a367ead4bf4b..96556c64c95d 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -30,6 +30,7 @@ struct macvlan_dev {
enum macvlan_mode mode;
u16 flags;
unsigned int macaddr_count;
+ u32 bc_queue_len_req;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index a3a838dcf8e4..7199280d89ca 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -79,8 +79,12 @@ struct ad_sigma_delta {
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * 'tx_buf' is up to 32 bits.
+ * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
+ * rounded to 16 bytes to take into account padding.
*/
- uint8_t data[4] ____cacheline_aligned;
+ uint8_t tx_buf[4] ____cacheline_aligned;
+ uint8_t rx_buf[16] __aligned(8);
};
static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index fbba4093f06c..8febc23f5f26 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,9 +11,6 @@
struct iio_buffer;
-void iio_buffer_set_attrs(struct iio_buffer *buffer,
- const struct attribute **attrs);
-
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
/**
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index f2e94196d31f..07c5a8e52ca8 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -11,6 +11,7 @@
* @channel_attr_list: keep track of automatically created channel
* attributes
* @chan_attr_group: group for all attrs in base directory
+ * @ioctl_handlers: ioctl handlers registered with the core handler
* @debugfs_dentry: device specific debugfs dentry
* @cached_reg_addr: cached register address for debugfs reads
* @read_buf: read buffer to be used for the initial reg read
@@ -22,6 +23,7 @@ struct iio_dev_opaque {
struct list_head buffer_list;
struct list_head channel_attr_list;
struct attribute_group chan_attr_group;
+ struct list_head ioctl_handlers;
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_dentry;
unsigned cached_reg_addr;
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index f015fa185253..e4a9822e6495 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -362,6 +362,8 @@ struct iio_trigger; /* forward declaration */
* and max. For lists, all possible values are enumerated.
* @write_raw: function to write a value to the device.
* Parameters are the same as for read_raw.
+ * @read_label: function to request label name for a specified label,
+ * for better channel identification.
* @write_raw_get_fmt: callback function to query the expected
* format/precision. If not set by the driver, write_raw
* returns IIO_VAL_INT_PLUS_MICRO.
@@ -420,6 +422,10 @@ struct iio_info {
int val2,
long mask);
+ int (*read_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label);
+
int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask);
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index cad8325903f9..055890b6ffcf 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -21,7 +21,7 @@ struct iio_trigger;
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
* @set_trigger_state: switch on/off the trigger on demand
- * @try_reenable: function to reenable the trigger when the
+ * @reenable: function to reenable the trigger when the
* use count is zero (may be NULL)
* @validate_device: function to validate the device when the
* current trigger gets changed.
@@ -31,7 +31,7 @@ struct iio_trigger;
**/
struct iio_trigger_ops {
int (*set_trigger_state)(struct iio_trigger *trig, bool state);
- int (*try_reenable)(struct iio_trigger *trig);
+ void (*reenable)(struct iio_trigger *trig);
int (*validate_device)(struct iio_trigger *trig,
struct iio_dev *indio_dev);
};
@@ -97,7 +97,7 @@ static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
}
/**
- * iio_device_set_drvdata() - Set trigger driver data
+ * iio_trigger_set_drvdata() - Set trigger driver data
* @trig: IIO trigger structure
* @data: Driver specific data
*
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index e99c91799359..7f154d1f8739 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -4,19 +4,28 @@
#include <linux/interrupt.h>
+struct attribute;
struct iio_dev;
struct iio_buffer_setup_ops;
-int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *setup_ops);
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct attribute **buffer_attrs);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
-int devm_iio_triggered_buffer_setup(struct device *dev,
- struct iio_dev *indio_dev,
- irqreturn_t (*h)(int irq, void *p),
- irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *ops);
+#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \
+ iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), (setup_ops), NULL)
+
+int devm_iio_triggered_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ const struct iio_buffer_setup_ops *ops,
+ const struct attribute **buffer_attrs);
+
+#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \
+ devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), (setup_ops), NULL)
#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 8fa7bcfb2da2..7233a2751754 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -29,6 +29,7 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
extern void ima_post_path_mknod(struct dentry *dentry);
extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
+extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size);
extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
#ifdef CONFIG_IMA_KEXEC
@@ -115,6 +116,11 @@ static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
return -EOPNOTSUPP;
}
+static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
#endif /* CONFIG_IMA */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 3515ca64e638..53aa0343bf69 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -105,7 +105,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
-#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
+#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN)
#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
@@ -126,7 +126,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
- IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+ IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index c75e4d3d8833..c093e81310a9 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -69,13 +69,32 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
+ preempt_disable();
+ pagefault_disable();
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
- iounmap_atomic(vaddr);
+ kunmap_local_indexed((void __force *)vaddr);
+ pagefault_enable();
+ preempt_enable();
+}
+
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ resource_size_t phys_addr;
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ kunmap_local_indexed((void __force *)vaddr);
}
static inline void __iomem *
@@ -97,7 +116,7 @@ io_mapping_unmap(void __iomem *vaddr)
iounmap(vaddr);
}
-#else
+#else /* HAVE_ATOMIC_IOMAP */
#include <linux/uaccess.h>
@@ -162,7 +181,18 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
preempt_enable();
}
-#endif /* HAVE_ATOMIC_IOMAP */
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ io_mapping_unmap(vaddr);
+}
+
+#endif /* !HAVE_ATOMIC_IOMAP */
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 4cde111e425b..fb4d5a763e0c 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -86,6 +86,9 @@ struct io_pgtable_cfg {
*
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
* for use in the upper half of a split address space.
+ *
+ * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
+ * attributes set in the TCR for a non-coherent page-table walker.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
@@ -93,6 +96,7 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
@@ -208,6 +212,10 @@ struct io_pgtable {
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
+struct io_pgtable_domain_attr {
+ unsigned long quirks;
+};
+
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
iop->cfg.tlb->tlb_flush_all(iop->cookie);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index b95a6f8db6ff..ffaa389ea128 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -118,6 +118,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
+ DOMAIN_ATTR_IO_PGTABLE_CFG,
DOMAIN_ATTR_MAX,
};
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index a06a78c67f19..05e22770af51 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -27,7 +27,6 @@ struct ipc_ids {
};
struct ipc_namespace {
- refcount_t count;
struct ipc_ids ids[3];
int sem_ctls[4];
@@ -128,7 +127,7 @@ extern struct ipc_namespace *copy_ipcs(unsigned long flags,
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- refcount_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
return ns;
}
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 30823780c192..ec2a47a81e42 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -14,28 +14,37 @@
*/
struct irq_work {
- union {
- struct __call_single_node node;
- struct {
- struct llist_node llnode;
- atomic_t flags;
- };
- };
+ struct __call_single_node node;
void (*func)(struct irq_work *);
};
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
+ .node = { .u_flags = (_flags), }, \
+ .func = (_func), \
+}
+
+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
+
+#define DEFINE_IRQ_WORK(name, _f) \
+ struct irq_work name = IRQ_WORK_INIT(_f)
+
static inline
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- atomic_set(&work->flags, 0);
- work->func = func;
+ *work = IRQ_WORK_INIT(func);
}
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
- .flags = ATOMIC_INIT(0), \
- .func = (_f) \
+static inline bool irq_work_is_pending(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
}
+static inline bool irq_work_is_busy(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
+}
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 5701a8b01726..42d196805f58 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -388,11 +388,19 @@ extern void irq_domain_associate_many(struct irq_domain *domain,
unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
-extern unsigned int irq_create_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
+extern unsigned int irq_create_mapping_affinity(struct irq_domain *host,
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity);
extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
extern void irq_dispose_mapping(unsigned int virq);
+static inline unsigned int irq_create_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq)
+{
+ return irq_create_mapping_affinity(host, hwirq, NULL);
+}
+
+
/**
* irq_linear_revmap() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 3ed4e8771b64..8de0e1373de7 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -107,14 +107,14 @@ do { \
current->irq_config = 0; \
} while (0)
-# define lockdep_irq_work_enter(__work) \
+# define lockdep_irq_work_enter(_flags) \
do { \
- if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
current->irq_config = 1; \
} while (0)
-# define lockdep_irq_work_exit(__work) \
+# define lockdep_irq_work_exit(_flags) \
do { \
- if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
current->irq_config = 0; \
} while (0)
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index bb2246c8ec13..82f29aa35062 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -9,9 +9,6 @@
extern struct tasklet_struct keyboard_tasklet;
extern char *func_table[MAX_NR_FUNC];
-extern char func_buf[];
-extern char *funcbufptr;
-extern int funcbufsize, funcbufleft;
/*
* kbd->xxx contains the VC-local things (flag settings etc..)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2f05e9128201..dbf6018fc312 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -204,6 +204,7 @@ extern int _cond_resched(void);
extern void ___might_sleep(const char *file, int line, int preempt_offset);
extern void __might_sleep(const char *file, int line, int preempt_offset);
extern void __cant_sleep(const char *file, int line, int preempt_offset);
+extern void __cant_migrate(const char *file, int line);
/**
* might_sleep - annotation for functions that can sleep
@@ -227,6 +228,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define cant_sleep() \
do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
+
+/**
+ * cant_migrate - annotation for functions that cannot migrate
+ *
+ * Will print a stack trace if executed in code which is migratable
+ */
+# define cant_migrate() \
+ do { \
+ if (IS_ENABLED(CONFIG_SMP)) \
+ __cant_migrate(__FILE__, __LINE__); \
+ } while (0)
+
/**
* non_block_start - annotate the start of section where sleeping is prohibited
*
@@ -251,6 +264,7 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
+# define cant_migrate() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
# define non_block_start() do { } while (0)
# define non_block_end() do { } while (0)
@@ -258,13 +272,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
-#ifndef CONFIG_PREEMPT_RT
-# define cant_migrate() cant_sleep()
-#else
- /* Placeholder for now */
-# define cant_migrate() do { } while (0)
-#endif
-
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first.
@@ -536,6 +543,7 @@ extern int panic_on_warn;
extern unsigned long panic_on_taint;
extern bool panic_on_taint_nousertaint;
extern int sysctl_panic_on_rcu_stall;
+extern int sysctl_max_rcu_stall_to_panic;
extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers;
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 89f6a4214a70..9e8ca8743c26 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -116,7 +116,7 @@ struct kernfs_elem_attr {
* kernfs node is represented by single kernfs_node. Most fields are
* private to kernfs and shouldn't be accessed directly by kernfs users.
*
- * As long as s_count reference is held, the kernfs_node itself is
+ * As long as count reference is held, the kernfs_node itself is
* accessible. Dereferencing elem or any other outer entity requires
* active reference.
*/
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 2ab2d6d6aeab..7d985a1dfe4a 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -29,6 +29,7 @@ struct kernel_pkey_params;
* clear the contents.
*/
struct key_preparsed_payload {
+ const char *orig_description; /* Actual or proposed description (maybe NULL) */
char *description; /* Proposed key description (or NULL) */
union key_payload payload; /* Proposed payload */
const void *data; /* Raw data */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 629abaf25681..a79404433812 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -27,6 +27,8 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
+#include <linux/refcount.h>
+#include <linux/freelist.h>
#include <asm/kprobes.h>
#ifdef CONFIG_KPROBES
@@ -144,6 +146,11 @@ static inline int kprobe_ftrace(struct kprobe *p)
* ignored, due to maxactive being too low.
*
*/
+struct kretprobe_holder {
+ struct kretprobe *rp;
+ refcount_t ref;
+};
+
struct kretprobe {
struct kprobe kp;
kretprobe_handler_t handler;
@@ -151,18 +158,18 @@ struct kretprobe {
int maxactive;
int nmissed;
size_t data_size;
- struct hlist_head free_instances;
- raw_spinlock_t lock;
+ struct freelist_head freelist;
+ struct kretprobe_holder *rph;
};
struct kretprobe_instance {
union {
- struct hlist_node hlist;
+ struct freelist_node freelist;
struct rcu_head rcu;
};
- struct kretprobe *rp;
+ struct llist_node llist;
+ struct kretprobe_holder *rph;
kprobe_opcode_t *ret_addr;
- struct task_struct *task;
void *fp;
char data[];
};
@@ -221,6 +228,14 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
return ret;
}
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+ "Kretprobe is accessed from instance under preemptive context");
+
+ return READ_ONCE(ri->rph->rp);
+}
+
#else /* CONFIG_KRETPROBES */
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
diff --git a/include/linux/list.h b/include/linux/list.h
index a18c87b63376..89bdc92e75c3 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
/*
- * Simple doubly linked list implementation.
+ * Circular doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 2e9c7215882b..24f207b0190b 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -197,6 +197,16 @@ static inline struct llist_node *llist_next(struct llist_node *node)
extern bool llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head);
+
+static inline bool __llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ new_last->next = head->first;
+ head->first = new_first;
+ return new_last->next == NULL;
+}
+
/**
* llist_add - add a new entry
* @new: new entry to be added
@@ -209,6 +219,11 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head)
return llist_add_batch(new, new, head);
}
+static inline bool __llist_add(struct llist_node *new, struct llist_head *head)
+{
+ return __llist_add_batch(new, new, head);
+}
+
/**
* llist_del_all - delete all entries from lock-less list
* @head: the head of lock-less list to delete all entries
@@ -222,6 +237,14 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
return xchg(&head->first, NULL);
}
+static inline struct llist_node *__llist_del_all(struct llist_head *head)
+{
+ struct llist_node *first = head->first;
+
+ head->first = NULL;
+ return first;
+}
+
extern struct llist_node *llist_del_first(struct llist_head *head);
struct llist_node *llist_reverse_order(struct llist_node *head);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f5594879175a..b9e9adec73e8 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -375,6 +375,12 @@ static inline void lockdep_unregister_key(struct lock_class_key *key)
#define lockdep_depth(tsk) (0)
+/*
+ * Dummy forward declarations, allow users to write less ifdef-y code
+ * and depend on dead code elimination.
+ */
+extern int lock_is_held(const void *);
+extern int lockdep_is_held(const void *);
#define lockdep_is_held_type(l, r) (1)
#define lockdep_assert_held(l) do { (void)(l); } while (0)
@@ -594,6 +600,16 @@ do { \
this_cpu_read(hardirqs_enabled))); \
} while (0)
+/*
+ * Acceptable for protecting per-CPU resources accessed from BH.
+ * Much like in_softirq() - semantics are ambiguous, use carefully.
+ */
+#define lockdep_assert_in_softirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (!in_softirq() || in_irq() || in_nmi())); \
+} while (0)
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
@@ -605,6 +621,7 @@ do { \
# define lockdep_assert_preemption_enabled() do { } while (0)
# define lockdep_assert_preemption_disabled() do { } while (0)
+# define lockdep_assert_in_softirq() do { } while (0)
#endif
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 28f23b341c1c..cd23355d2271 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -26,7 +26,7 @@
struct lsm_network_audit {
int netif;
- struct sock *sk;
+ const struct sock *sk;
u16 family;
__be16 dport;
__be16 sport;
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 32a940117e7a..acc0494cceba 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -301,7 +301,7 @@ LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk,
struct sock *newsk)
LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid)
LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
-LSM_HOOK(int, 0, inet_conn_request, struct sock *sk, struct sk_buff *skb,
+LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk,
const struct request_sock *req)
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index ff7b7607c8cf..52b1610eae68 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -25,6 +25,9 @@
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
+/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
+#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
+
/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 52aa4821093a..959ad7d850b4 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -95,6 +95,12 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
size_t length);
+ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 vtag);
+ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag);
+ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
+ size_t length, u8 *vtag);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 922a7f600465..08ed57e02b73 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -235,11 +235,6 @@ struct mem_cgroup {
struct vmpressure vmpressure;
/*
- * Should the accounting and control be hierarchical, per subtree?
- */
- bool use_hierarchy;
-
- /*
* Should the OOM killer kill all belonging tasks, had it kill one?
*/
bool oom_group;
@@ -296,7 +291,6 @@ struct mem_cgroup {
int tcpmem_pressure;
#ifdef CONFIG_MEMCG_KMEM
- /* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct obj_cgroup __rcu *objcg;
@@ -343,6 +337,175 @@ struct mem_cgroup {
extern struct mem_cgroup *root_mem_cgroup;
+enum page_memcg_data_flags {
+ /* page->memcg_data is a pointer to an objcgs vector */
+ MEMCG_DATA_OBJCGS = (1UL << 0),
+ /* page has been accounted as a non-slab kernel page */
+ MEMCG_DATA_KMEM = (1UL << 1),
+ /* the next bit after the last actual flag */
+ __NR_MEMCG_DATA_FLAGS = (1UL << 2),
+};
+
+#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
+
+/*
+ * page_memcg - get the memory cgroup associated with a page
+ * @page: a pointer to the page struct
+ *
+ * Returns a pointer to the memory cgroup associated with the page,
+ * or NULL. This function assumes that the page is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of pages, e.g. slab pages or ex-slab pages.
+ *
+ * Any of the following ensures page and memcg binding stability:
+ * - the page lock
+ * - LRU isolation
+ * - lock_page_memcg()
+ * - exclusive reference
+ */
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ unsigned long memcg_data = page->memcg_data;
+
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * page_memcg_rcu - locklessly get the memory cgroup associated with a page
+ * @page: a pointer to the page struct
+ *
+ * Returns a pointer to the memory cgroup associated with the page,
+ * or NULL. This function assumes that the page is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of pages, e.g. slab pages or ex-slab pages.
+ */
+static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+{
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ return (struct mem_cgroup *)(READ_ONCE(page->memcg_data) &
+ ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * page_memcg_check - get the memory cgroup associated with a page
+ * @page: a pointer to the page struct
+ *
+ * Returns a pointer to the memory cgroup associated with the page,
+ * or NULL. This function unlike page_memcg() can take any page
+ * as an argument. It has to be used in cases when it's not known if a page
+ * has an associated memory cgroup pointer or an object cgroups vector.
+ *
+ * Any of the following ensures page and memcg binding stability:
+ * - the page lock
+ * - LRU isolation
+ * - lock_page_memcg()
+ * - exclusive reference
+ */
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ /*
+ * Because page->memcg_data might be changed asynchronously
+ * for slab pages, READ_ONCE() should be used here.
+ */
+ unsigned long memcg_data = READ_ONCE(page->memcg_data);
+
+ if (memcg_data & MEMCG_DATA_OBJCGS)
+ return NULL;
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * PageMemcgKmem - check if the page has MemcgKmem flag set
+ * @page: a pointer to the page struct
+ *
+ * Checks if the page has MemcgKmem flag set. The caller must ensure that
+ * the page has an associated memory cgroup. It's not safe to call this function
+ * against some types of pages, e.g. slab pages.
+ */
+static inline bool PageMemcgKmem(struct page *page)
+{
+ VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
+ return page->memcg_data & MEMCG_DATA_KMEM;
+}
+
+#ifdef CONFIG_MEMCG_KMEM
+/*
+ * page_objcgs - get the object cgroups vector associated with a page
+ * @page: a pointer to the page struct
+ *
+ * Returns a pointer to the object cgroups vector associated with the page,
+ * or NULL. This function assumes that the page is known to have an
+ * associated object cgroups vector. It's not safe to call this function
+ * against pages, which might have an associated memory cgroup: e.g.
+ * kernel stack pages.
+ */
+static inline struct obj_cgroup **page_objcgs(struct page *page)
+{
+ unsigned long memcg_data = READ_ONCE(page->memcg_data);
+
+ VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
+ VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
+
+ return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * page_objcgs_check - get the object cgroups vector associated with a page
+ * @page: a pointer to the page struct
+ *
+ * Returns a pointer to the object cgroups vector associated with the page,
+ * or NULL. This function is safe to use if the page can be directly associated
+ * with a memory cgroup.
+ */
+static inline struct obj_cgroup **page_objcgs_check(struct page *page)
+{
+ unsigned long memcg_data = READ_ONCE(page->memcg_data);
+
+ if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
+ return NULL;
+
+ VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
+
+ return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * set_page_objcgs - associate a page with a object cgroups vector
+ * @page: a pointer to the page struct
+ * @objcgs: a pointer to the object cgroups vector
+ *
+ * Atomically associates a page with a vector of object cgroups.
+ */
+static inline bool set_page_objcgs(struct page *page,
+ struct obj_cgroup **objcgs)
+{
+ return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs |
+ MEMCG_DATA_OBJCGS);
+}
+#else
+static inline struct obj_cgroup **page_objcgs(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct obj_cgroup **page_objcgs_check(struct page *page)
+{
+ return NULL;
+}
+
+static inline bool set_page_objcgs(struct page *page,
+ struct obj_cgroup **objcgs)
+{
+ return true;
+}
+#endif
+
static __always_inline bool memcg_stat_item_in_bytes(int idx)
{
if (idx == MEMCG_PERCPU_B)
@@ -491,12 +654,41 @@ out:
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
+static inline bool lruvec_holds_page_lru_lock(struct page *page,
+ struct lruvec *lruvec)
+{
+ pg_data_t *pgdat = page_pgdat(page);
+ const struct mem_cgroup *memcg;
+ struct mem_cgroup_per_node *mz;
+
+ if (mem_cgroup_disabled())
+ return lruvec == &pgdat->__lruvec;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ memcg = page_memcg(page) ? : root_mem_cgroup;
+
+ return lruvec->pgdat == pgdat && mz->memcg == memcg;
+}
+
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
+struct lruvec *lock_page_lruvec(struct page *page);
+struct lruvec *lock_page_lruvec_irq(struct page *page);
+struct lruvec *lock_page_lruvec_irqsave(struct page *page,
+ unsigned long *flags);
+
+#ifdef CONFIG_DEBUG_VM
+void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
+#else
+static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
+{
+}
+#endif
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL;
@@ -589,8 +781,6 @@ static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
{
if (root == memcg)
return true;
- if (!root->use_hierarchy)
- return false;
return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
}
@@ -743,15 +933,19 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
static inline void __mod_memcg_page_state(struct page *page,
int idx, int val)
{
- if (page->mem_cgroup)
- __mod_memcg_state(page->mem_cgroup, idx, val);
+ struct mem_cgroup *memcg = page_memcg(page);
+
+ if (memcg)
+ __mod_memcg_state(memcg, idx, val);
}
static inline void mod_memcg_page_state(struct page *page,
int idx, int val)
{
- if (page->mem_cgroup)
- mod_memcg_state(page->mem_cgroup, idx, val);
+ struct mem_cgroup *memcg = page_memcg(page);
+
+ if (memcg)
+ mod_memcg_state(memcg, idx, val);
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
@@ -794,19 +988,15 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
-void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
- int val);
-void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
-
-void mod_memcg_obj_state(void *p, int idx, int val);
+void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
-static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
unsigned long flags;
local_irq_save(flags);
- __mod_lruvec_slab_state(p, idx, val);
+ __mod_lruvec_kmem_state(p, idx, val);
local_irq_restore(flags);
}
@@ -820,43 +1010,6 @@ static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
local_irq_restore(flags);
}
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_lruvec_state(lruvec, idx, val);
- local_irq_restore(flags);
-}
-
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- struct page *head = compound_head(page); /* rmap on tail pages */
- pg_data_t *pgdat = page_pgdat(page);
- struct lruvec *lruvec;
-
- /* Untracked pages have no memcg, no lruvec. Update only the node */
- if (!head->mem_cgroup) {
- __mod_node_page_state(pgdat, idx, val);
- return;
- }
-
- lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
- __mod_lruvec_state(lruvec, idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_lruvec_page_state(page, idx, val);
- local_irq_restore(flags);
-}
-
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
@@ -878,8 +1031,10 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
static inline void count_memcg_page_event(struct page *page,
enum vm_event_item idx)
{
- if (page->mem_cgroup)
- count_memcg_events(page->mem_cgroup, idx, 1);
+ struct mem_cgroup *memcg = page_memcg(page);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, 1);
}
static inline void count_memcg_event_mm(struct mm_struct *mm,
@@ -948,6 +1103,27 @@ void mem_cgroup_split_huge_fixup(struct page *head);
struct mem_cgroup;
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ return NULL;
+}
+
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return false;
+}
+
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
return true;
@@ -1020,6 +1196,14 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
return &pgdat->__lruvec;
}
+static inline bool lruvec_holds_page_lru_lock(struct page *page,
+ struct lruvec *lruvec)
+{
+ pg_data_t *pgdat = page_pgdat(page);
+
+ return lruvec == &pgdat->__lruvec;
+}
+
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
return NULL;
@@ -1045,6 +1229,31 @@ static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
+static inline struct lruvec *lock_page_lruvec(struct page *page)
+{
+ struct pglist_data *pgdat = page_pgdat(page);
+
+ spin_lock(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
+}
+
+static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
+{
+ struct pglist_data *pgdat = page_pgdat(page);
+
+ spin_lock_irq(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
+}
+
+static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
+ unsigned long *flagsp)
+{
+ struct pglist_data *pgdat = page_pgdat(page);
+
+ spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
+ return &pgdat->__lruvec;
+}
+
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -1215,31 +1424,7 @@ static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
{
}
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
-}
-
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
-}
-
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(page_pgdat(page), idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- mod_node_page_state(page_pgdat(page), idx, val);
-}
-
-static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);
@@ -1247,7 +1432,7 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
__mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);
@@ -1255,10 +1440,6 @@ static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void mod_memcg_obj_state(void *p, int idx, int val)
-{
-}
-
static inline
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
@@ -1292,6 +1473,10 @@ static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
+
+static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
+{
+}
#endif /* CONFIG_MEMCG */
/* idx can be of type enum memcg_stat_item or node_stat_item */
@@ -1322,38 +1507,14 @@ static inline void __dec_memcg_page_state(struct page *page,
__mod_memcg_page_state(page, idx, -1);
}
-static inline void __inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- __mod_lruvec_state(lruvec, idx, 1);
-}
-
-static inline void __dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- __mod_lruvec_state(lruvec, idx, -1);
-}
-
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- __mod_lruvec_page_state(page, idx, 1);
-}
-
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- __mod_lruvec_page_state(page, idx, -1);
-}
-
-static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
+static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_slab_state(p, idx, 1);
+ __mod_lruvec_kmem_state(p, idx, 1);
}
-static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
+static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_slab_state(p, idx, -1);
+ __mod_lruvec_kmem_state(p, idx, -1);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
@@ -1384,41 +1545,61 @@ static inline void dec_memcg_page_state(struct page *page,
mod_memcg_page_state(page, idx, -1);
}
-static inline void inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
- mod_lruvec_state(lruvec, idx, 1);
+ struct mem_cgroup *memcg;
+
+ memcg = lruvec_memcg(lruvec);
+ if (!memcg)
+ return NULL;
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ return NULL;
+ return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
}
-static inline void dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline void unlock_page_lruvec(struct lruvec *lruvec)
{
- mod_lruvec_state(lruvec, idx, -1);
+ spin_unlock(&lruvec->lru_lock);
}
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
{
- mod_lruvec_page_state(page, idx, 1);
+ spin_unlock_irq(&lruvec->lru_lock);
}
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
+ unsigned long flags)
{
- mod_lruvec_page_state(page, idx, -1);
+ spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
-static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+ struct lruvec *locked_lruvec)
{
- struct mem_cgroup *memcg;
+ if (locked_lruvec) {
+ if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+ return locked_lruvec;
- memcg = lruvec_memcg(lruvec);
- if (!memcg)
- return NULL;
- memcg = parent_mem_cgroup(memcg);
- if (!memcg)
- return NULL;
- return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
+ unlock_page_lruvec_irq(locked_lruvec);
+ }
+
+ return lock_page_lruvec_irq(page);
+}
+
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+ struct lruvec *locked_lruvec, unsigned long *flags)
+{
+ if (locked_lruvec) {
+ if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+ return locked_lruvec;
+
+ unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+ }
+
+ return lock_page_lruvec_irqsave(page, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -1437,7 +1618,7 @@ static inline void mem_cgroup_track_foreign_dirty(struct page *page,
if (mem_cgroup_disabled())
return;
- if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
+ if (unlikely(&page_memcg(page)->css != wb->memcg_css))
mem_cgroup_track_foreign_dirty_slowpath(page, wb);
}
@@ -1568,9 +1749,8 @@ static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
}
/*
- * helper for accessing a memcg's index. It will be used as an index in the
- * child cache array in kmem_cache, and also to derive its name. This function
- * will return -1 when this is not a kmem-limited memcg.
+ * A helper for accessing memcg's kmem_id, used for getting
+ * corresponding LRU lists.
*/
static inline int memcg_cache_id(struct mem_cgroup *memcg)
{
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index d4841e5a5f45..562862ff819c 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -214,7 +214,6 @@ enum mhi_db_brst_mode {
* @offload_channel: The client manages the channel completely
* @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
* @auto_queue: Framework will automatically queue buffers for DL traffic
- * @auto_start: Automatically start (open) this channel
* @wake-capable: Channel capable of waking up the system
*/
struct mhi_channel_config {
@@ -232,7 +231,6 @@ struct mhi_channel_config {
bool offload_channel;
bool doorbell_mode_switch;
bool auto_queue;
- bool auto_start;
bool wake_capable;
};
@@ -337,6 +335,7 @@ struct mhi_controller_config {
* @wlock: Lock for protecting device wakeup
* @mhi_link_info: Device bandwidth info
* @st_worker: State transition worker
+ * @hiprio_wq: High priority workqueue for MHI work such as state transitions
* @state_event: State change event
* @status_cb: CB function to notify power states of the device (required)
* @wake_get: CB function to assert device wake (optional)
@@ -349,6 +348,7 @@ struct mhi_controller_config {
* @read_reg: Read a MHI register via the physical link (required)
* @write_reg: Write a MHI register via the physical link (required)
* @buffer_len: Bounce buffer length
+ * @index: Index of the MHI controller instance
* @bounce_buf: Use of bounce buffer
* @fbc_download: MHI host needs to do complete image transfer (optional)
* @pre_init: MHI host needs to do pre-initialization before power up
@@ -419,6 +419,7 @@ struct mhi_controller {
spinlock_t wlock;
struct mhi_link_info mhi_link_info;
struct work_struct st_worker;
+ struct workqueue_struct *hiprio_wq;
wait_queue_head_t state_event;
void (*status_cb)(struct mhi_controller *mhi_cntrl,
@@ -438,6 +439,7 @@ struct mhi_controller {
u32 val);
size_t buffer_len;
+ int index;
bool bounce_buf;
bool fbc_download;
bool pre_init;
@@ -645,12 +647,12 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
- * mhi_download_rddm_img - Download ramdump image from device for
- * debugging purpose.
+ * mhi_download_rddm_image - Download ramdump image from device for
+ * debugging purpose.
* @mhi_cntrl: MHI controller
* @in_panic: Download rddm image during kernel panic
*/
-int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic);
/**
* mhi_force_rddm_mode - Force device into rddm mode
@@ -659,6 +661,12 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_get_exec_env - Get BHI execution environment of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_get_mhi_state - Get MHI state of the device
* @mhi_cntrl: MHI controller
*/
@@ -737,4 +745,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+/**
+ * mhi_queue_is_full - Determine whether queueing new elements is possible
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ */
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
+
#endif /* _MHI_H_ */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 0f8d1583fa8e..4594838a0f7c 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -45,8 +45,8 @@ extern struct page *alloc_migration_target(struct page *page, unsigned long priv
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void putback_movable_page(struct page *page);
-extern int migrate_prep(void);
-extern int migrate_prep_local(void);
+extern void migrate_prep(void);
+extern void migrate_prep_local(void);
extern void migrate_page_states(struct page *newpage, struct page *page);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 06e066e04a4b..236a7d04f891 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -46,7 +46,6 @@
#define DEFAULT_UAR_PAGE_SHIFT 12
-#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 128
#define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index cf824366a7d1..f1de49d64a98 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -346,6 +346,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
+ MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
@@ -717,6 +718,11 @@ struct mlx5_eqe_sync_fw_update {
u8 sync_rst_state;
};
+struct mlx5_eqe_vhca_state {
+ __be16 ec_function;
+ __be16 function_id;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -736,6 +742,7 @@ union ev_data {
struct mlx5_eqe_temp_warning temp_warning;
struct mlx5_eqe_xrq_err xrq_err;
struct mlx5_eqe_sync_fw_update sync_fw_update;
+ struct mlx5_eqe_vhca_state vhca_state;
} __packed;
struct mlx5_eqe {
@@ -1076,6 +1083,7 @@ enum {
MLX5_MATCH_INNER_HEADERS = 1 << 2,
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
+ MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
};
enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 0f23e1ed5e71..f93bfe7473aa 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -48,6 +48,7 @@
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
+#include <linux/auxiliary_bus.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -56,6 +57,8 @@
#include <linux/ptp_clock_kernel.h>
#include <net/devlink.h>
+#define MLX5_ADEV_NAME "mlx5_core"
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
@@ -534,6 +537,17 @@ struct mlx5_core_roce {
struct mlx5_flow_handle *allow_rule;
};
+enum {
+ MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+};
+
+struct mlx5_adev {
+ struct auxiliary_device adev;
+ struct mlx5_core_dev *mdev;
+ int idx;
+};
+
struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
struct mlx5_irq_table *irq_table;
@@ -547,7 +561,7 @@ struct mlx5_priv {
atomic_t reg_pages;
struct list_head free_list;
int vfs_pages;
- int peer_pf_pages;
+ int host_pf_pages;
struct mlx5_core_health health;
@@ -571,6 +585,8 @@ struct mlx5_priv {
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
+ struct mlx5_adev **adev;
+ int adev_idx;
struct mlx5_events *events;
struct mlx5_flow_steering *steering;
@@ -578,6 +594,7 @@ struct mlx5_priv {
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
+ u32 flags;
struct mlx5_devcom *devcom;
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
@@ -888,10 +905,6 @@ enum {
CMD_ALLOWED_OPCODE_ALL,
};
-int mlx5_cmd_init(struct mlx5_core_dev *dev);
-void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
-void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
- enum mlx5_cmdif_state cmdif_state);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
@@ -1059,23 +1072,6 @@ enum {
MAX_MR_CACHE_ENTRIES
};
-enum {
- MLX5_INTERFACE_PROTOCOL_IB = 0,
- MLX5_INTERFACE_PROTOCOL_ETH = 1,
- MLX5_INTERFACE_PROTOCOL_VDPA = 2,
-};
-
-struct mlx5_interface {
- void * (*add)(struct mlx5_core_dev *dev);
- void (*remove)(struct mlx5_core_dev *dev, void *context);
- int (*attach)(struct mlx5_core_dev *dev, void *context);
- void (*detach)(struct mlx5_core_dev *dev, void *context);
- int protocol;
- struct list_head list;
-};
-
-int mlx5_register_interface(struct mlx5_interface *intf);
-void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
@@ -1137,7 +1133,7 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF;
}
-static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
+static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
}
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index b0ae8020f13e..29fd832950e0 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -96,10 +96,10 @@ static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num);
-u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
+u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
-static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
+static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
{
return MLX5_ESWITCH_NONE;
}
@@ -136,4 +136,8 @@ mlx5_eswitch_get_vport_metadata_mask(void)
}
#endif /* CONFIG_MLX5_ESWITCH */
+static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
+}
#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 846d94ad04bc..1f51f4c3b1af 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -50,6 +50,7 @@ enum {
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
+ MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
};
#define LEFTOVERS_RULE_NUM 2
@@ -132,6 +133,7 @@ struct mlx5_flow_destination {
struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
+ u32 sampler_id;
};
};
@@ -173,9 +175,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- u32 level, u16 vport);
+ struct mlx5_flow_table_attr *ft_attr, u16 vport);
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
struct mlx5_flow_namespace *ns,
int prio, u32 level);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a092346c7b2d..0d6e287d614f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -299,6 +299,8 @@ enum {
MLX5_CMD_OP_CREATE_UMEM = 0xa08,
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
+ MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
+ MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_MAX
};
@@ -623,6 +625,26 @@ struct mlx5_ifc_fte_match_set_misc3_bits {
u8 reserved_at_140[0xc0];
};
+struct mlx5_ifc_fte_match_set_misc4_bits {
+ u8 prog_sample_field_value_0[0x20];
+
+ u8 prog_sample_field_id_0[0x20];
+
+ u8 prog_sample_field_value_1[0x20];
+
+ u8 prog_sample_field_id_1[0x20];
+
+ u8 prog_sample_field_value_2[0x20];
+
+ u8 prog_sample_field_id_2[0x20];
+
+ u8 prog_sample_field_value_3[0x20];
+
+ u8 prog_sample_field_id_3[0x20];
+
+ u8 reserved_at_100[0x100];
+};
+
struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20];
@@ -891,7 +913,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 tunnel_stateless_ipv4_over_vxlan[0x1];
u8 tunnel_stateless_ip_over_ip[0x1];
u8 insert_trailer[0x1];
- u8 reserved_at_2b[0x5];
+ u8 reserved_at_2b[0x1];
+ u8 tunnel_stateless_ip_over_ip_rx[0x1];
+ u8 tunnel_stateless_ip_over_ip_tx[0x1];
+ u8 reserved_at_2e[0x2];
u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
@@ -1223,8 +1248,22 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+enum {
+ MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
+ MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x30];
+ u8 reserved_at_0[0x1f];
+ u8 vhca_resource_manager[0x1];
+
+ u8 reserved_at_20[0x3];
+ u8 event_on_vhca_state_teardown_request[0x1];
+ u8 event_on_vhca_state_in_use[0x1];
+ u8 event_on_vhca_state_active[0x1];
+ u8 event_on_vhca_state_allocated[0x1];
+ u8 event_on_vhca_state_invalid[0x1];
+ u8 reserved_at_28[0x8];
u8 vhca_id[0x10];
u8 reserved_at_40[0x40];
@@ -1241,7 +1280,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ece_support[0x1];
u8 reserved_at_a4[0x7];
u8 log_max_srq[0x5];
- u8 reserved_at_b0[0x10];
+ u8 reserved_at_b0[0x2];
+ u8 ts_cqe_to_dest_cqn[0x1];
+ u8 reserved_at_b3[0xd];
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
@@ -1512,7 +1553,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
u8 log_min_hairpin_wq_data_sz[0x5];
- u8 reserved_at_3e8[0x3];
+ u8 reserved_at_3e8[0x2];
+ u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -1521,7 +1563,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 general_obj_types[0x40];
- u8 reserved_at_440[0x20];
+ u8 reserved_at_440[0x4];
+ u8 steering_format_version[0x4];
+ u8 create_qp_start_hint[0x18];
u8 reserved_at_460[0x3];
u8 log_max_uctx[0x5];
@@ -1580,7 +1624,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_num_of_monitor_counters[0x10];
u8 num_ppcnt_monitor_counters[0x10];
- u8 reserved_at_640[0x10];
+ u8 max_num_sf[0x10];
u8 num_q_monitor_counters[0x10];
u8 reserved_at_660[0x20];
@@ -1616,6 +1660,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
@@ -1668,7 +1713,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
- u8 reserved_at_a00[0x600];
+ struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
+
+ u8 reserved_at_c00[0x400];
};
enum {
@@ -3289,8 +3336,12 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_80[0x10];
u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_a0[0x50];
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x8];
+ u8 ts_cqe_to_dest_cqn[0x18];
+ u8 reserved_at_e0[0x10];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
u8 reserved_at_110[0x10];
@@ -4204,7 +4255,11 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
union mlx5_ifc_hca_cap_union_bits capability;
};
@@ -5461,6 +5516,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -10657,11 +10713,13 @@ struct mlx5_ifc_affiliated_event_header_bits {
enum {
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc),
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT(0x20),
};
enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
+ MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
};
enum {
@@ -10736,6 +10794,33 @@ struct mlx5_ifc_create_encryption_key_in_bits {
struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
};
+struct mlx5_ifc_sampler_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 table_type[0x8];
+ u8 level[0x8];
+ u8 reserved_at_50[0xf];
+ u8 ignore_flow_level[0x1];
+
+ u8 sample_ratio[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 sample_table_id[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 default_table_id[0x18];
+
+ u8 sw_steering_icm_address_rx[0x40];
+ u8 sw_steering_icm_address_tx[0x40];
+
+ u8 reserved_at_140[0xa0];
+};
+
+struct mlx5_ifc_create_sampler_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
new file mode 100644
index 000000000000..98b56b75c625
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_IFC_VDPA_H_
+#define __MLX5_IFC_VDPA_H_
+
+enum {
+ MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0,
+ MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1,
+ MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps?
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+};
+
+struct mlx5_ifc_virtio_q_bits {
+ u8 virtio_q_type[0x8];
+ u8 reserved_at_8[0x5];
+ u8 event_mode[0x3];
+ u8 queue_index[0x10];
+
+ u8 full_emulation[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 reserved_at_22[0x2];
+ u8 offload_type[0x4];
+ u8 event_qpn_or_msix[0x18];
+
+ u8 doorbell_stride_index[0x10];
+ u8 queue_size[0x10];
+
+ u8 device_emulation_id[0x20];
+
+ u8 desc_addr[0x40];
+
+ u8 used_addr[0x40];
+
+ u8 available_addr[0x40];
+
+ u8 virtio_q_mkey[0x20];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_170[0x8];
+ u8 error_type[0x8];
+
+ u8 umem_1_id[0x20];
+
+ u8 umem_1_size[0x20];
+
+ u8 umem_1_offset[0x40];
+
+ u8 umem_2_id[0x20];
+
+ u8 umem_2_size[0x20];
+
+ u8 umem_2_offset[0x40];
+
+ u8 umem_3_id[0x20];
+
+ u8 umem_3_size[0x20];
+
+ u8 umem_3_offset[0x40];
+
+ u8 counter_set_id[0x20];
+
+ u8 reserved_at_320[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_340[0xc0];
+};
+
+struct mlx5_ifc_virtio_net_q_object_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x20];
+
+ u8 vhca_id[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 queue_feature_bit_mask_12_3[0xa];
+ u8 dirty_bitmap_dump_enable[0x1];
+ u8 vhost_log_page[0x5];
+ u8 reserved_at_90[0xc];
+ u8 state[0x4];
+
+ u8 reserved_at_a0[0x5];
+ u8 queue_feature_bit_mask_2_0[0x3];
+ u8 tisn_or_qpn[0x18];
+
+ u8 dirty_bitmap_mkey[0x20];
+
+ u8 dirty_bitmap_size[0x20];
+
+ u8 dirty_bitmap_addr[0x40];
+
+ u8 hw_available_index[0x10];
+ u8 hw_used_index[0x10];
+
+ u8 reserved_at_160[0xa0];
+
+ struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+enum {
+ MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+};
+
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
+};
+
+enum {
+ MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
+ MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
+};
+
+struct mlx5_ifc_modify_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_modify_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+#endif /* __MLX5_IFC_VDPA_H_ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index db6ae4d3fb4e..abc7b3154298 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -557,8 +557,16 @@ enum page_entry_size {
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
- int (*split)(struct vm_area_struct * area, unsigned long addr);
- int (*mremap)(struct vm_area_struct * area);
+ /* Called any time before splitting to check if it's allowed */
+ int (*may_split)(struct vm_area_struct *area, unsigned long addr);
+ int (*mremap)(struct vm_area_struct *area, unsigned long flags);
+ /*
+ * Called by mprotect() to make driver-specific permission
+ * checks before mprotect() is finalised. The VMA must not
+ * be modified. Returns 0 if eprotect() can proceed.
+ */
+ int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
enum page_entry_size pe_size);
@@ -1484,28 +1492,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
-#ifdef CONFIG_MEMCG
-static inline struct mem_cgroup *page_memcg(struct page *page)
-{
- return page->mem_cgroup;
-}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return READ_ONCE(page->mem_cgroup);
-}
-#else
-static inline struct mem_cgroup *page_memcg(struct page *page)
-{
- return NULL;
-}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return NULL;
-}
-#endif
-
/*
* Some inline functions in vmstat.h depend on page_zone()
*/
@@ -1716,8 +1702,8 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
-extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, unsigned int gup_flags);
+extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ void *buf, int len, unsigned int gup_flags);
long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
@@ -2203,7 +2189,7 @@ static inline bool pgtable_pte_page_ctor(struct page *page)
if (!ptlock_init(page))
return false;
__SetPageTable(page);
- inc_zone_page_state(page, NR_PAGETABLE);
+ inc_lruvec_page_state(page, NR_PAGETABLE);
return true;
}
@@ -2211,7 +2197,7 @@ static inline void pgtable_pte_page_dtor(struct page *page)
{
ptlock_free(page);
__ClearPageTable(page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ dec_lruvec_page_state(page, NR_PAGETABLE);
}
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
@@ -2298,7 +2284,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
if (!pmd_ptlock_init(page))
return false;
__SetPageTable(page);
- inc_zone_page_state(page, NR_PAGETABLE);
+ inc_lruvec_page_state(page, NR_PAGETABLE);
return true;
}
@@ -2306,7 +2292,7 @@ static inline void pgtable_pmd_page_dtor(struct page *page)
{
pmd_ptlock_free(page);
__ClearPageTable(page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ dec_lruvec_page_state(page, NR_PAGETABLE);
}
/*
@@ -2433,9 +2419,6 @@ static inline int early_pfn_to_nid(unsigned long pfn)
#else
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
-/* there is a per-arch backend function. */
-extern int __meminit __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
@@ -2874,44 +2857,56 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size,
pte_fn_t fn, void *data);
+extern void init_mem_debugging_and_hardening(void);
#ifdef CONFIG_PAGE_POISONING
-extern bool page_poisoning_enabled(void);
-extern void kernel_poison_pages(struct page *page, int numpages, int enable);
+extern void __kernel_poison_pages(struct page *page, int numpages);
+extern void __kernel_unpoison_pages(struct page *page, int numpages);
+extern bool _page_poisoning_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
+static inline bool page_poisoning_enabled(void)
+{
+ return _page_poisoning_enabled_early;
+}
+/*
+ * For use in fast paths after init_mem_debugging() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool page_poisoning_enabled_static(void)
+{
+ return static_branch_unlikely(&_page_poisoning_enabled);
+}
+static inline void kernel_poison_pages(struct page *page, int numpages)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_poison_pages(page, numpages);
+}
+static inline void kernel_unpoison_pages(struct page *page, int numpages)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_unpoison_pages(page, numpages);
+}
#else
static inline bool page_poisoning_enabled(void) { return false; }
-static inline void kernel_poison_pages(struct page *page, int numpages,
- int enable) { }
+static inline bool page_poisoning_enabled_static(void) { return false; }
+static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
+static inline void kernel_poison_pages(struct page *page, int numpages) { }
+static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
#endif
-#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
-DECLARE_STATIC_KEY_TRUE(init_on_alloc);
-#else
DECLARE_STATIC_KEY_FALSE(init_on_alloc);
-#endif
static inline bool want_init_on_alloc(gfp_t flags)
{
- if (static_branch_unlikely(&init_on_alloc) &&
- !page_poisoning_enabled())
+ if (static_branch_unlikely(&init_on_alloc))
return true;
return flags & __GFP_ZERO;
}
-#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
-DECLARE_STATIC_KEY_TRUE(init_on_free);
-#else
DECLARE_STATIC_KEY_FALSE(init_on_free);
-#endif
static inline bool want_init_on_free(void)
{
- return static_branch_unlikely(&init_on_free) &&
- !page_poisoning_enabled();
+ return static_branch_unlikely(&init_on_free);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void init_debug_pagealloc(void);
-#else
-static inline void init_debug_pagealloc(void) {}
-#endif
extern bool _debug_pagealloc_enabled_early;
DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
@@ -2933,28 +2928,28 @@ static inline bool debug_pagealloc_enabled_static(void)
return static_branch_unlikely(&_debug_pagealloc_enabled);
}
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
-extern void __kernel_map_pages(struct page *page, int numpages, int enable);
-
+#ifdef CONFIG_DEBUG_PAGEALLOC
/*
- * When called in DEBUG_PAGEALLOC context, the call should most likely be
- * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
+ * To support DEBUG_PAGEALLOC architecture must ensure that
+ * __kernel_map_pages() never fails
*/
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable)
-{
- __kernel_map_pages(page, numpages, enable);
-}
-#ifdef CONFIG_HIBERNATION
-extern bool kernel_page_present(struct page *page);
-#endif /* CONFIG_HIBERNATION */
-#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable) {}
-#ifdef CONFIG_HIBERNATION
-static inline bool kernel_page_present(struct page *page) { return true; }
-#endif /* CONFIG_HIBERNATION */
-#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
+{
+ if (debug_pagealloc_enabled_static())
+ __kernel_map_pages(page, numpages, 1);
+}
+
+static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
+{
+ if (debug_pagealloc_enabled_static())
+ __kernel_map_pages(page, numpages, 0);
+}
+#else /* CONFIG_DEBUG_PAGEALLOC */
+static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
+static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5a9238f6caad..07d9acb5b19c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -14,6 +14,7 @@
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
+#include <linux/seqlock.h>
#include <asm/mmu.h>
@@ -78,7 +79,7 @@ struct page {
struct { /* Page cache and anonymous pages */
/**
* @lru: Pageout list, eg. active_list protected by
- * pgdat->lru_lock. Sometimes used as a generic list
+ * lruvec->lru_lock. Sometimes used as a generic list
* by the page owner.
*/
struct list_head lru;
@@ -199,10 +200,7 @@ struct page {
atomic_t _refcount;
#ifdef CONFIG_MEMCG
- union {
- struct mem_cgroup *mem_cgroup;
- struct obj_cgroup **obj_cgroups;
- };
+ unsigned long memcg_data;
#endif
/*
@@ -446,6 +444,13 @@ struct mm_struct {
*/
atomic_t has_pinned;
+ /**
+ * @write_protect_seq: Locked when any thread is write
+ * protecting pages mapped by this mm to enforce a later COW,
+ * for instance during page table copying for fork().
+ */
+ seqcount_t write_protect_seq;
+
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 18e7eae9b5ba..0540f0156f58 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -1,11 +1,65 @@
#ifndef _LINUX_MMAP_LOCK_H
#define _LINUX_MMAP_LOCK_H
+#include <linux/lockdep.h>
+#include <linux/mm_types.h>
#include <linux/mmdebug.h>
+#include <linux/rwsem.h>
+#include <linux/tracepoint-defs.h>
+#include <linux/types.h>
#define MMAP_LOCK_INITIALIZER(name) \
.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
+DECLARE_TRACEPOINT(mmap_lock_start_locking);
+DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
+DECLARE_TRACEPOINT(mmap_lock_released);
+
+#ifdef CONFIG_TRACING
+
+void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
+void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
+ bool success);
+void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
+
+static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
+ bool write)
+{
+ if (tracepoint_enabled(mmap_lock_start_locking))
+ __mmap_lock_do_trace_start_locking(mm, write);
+}
+
+static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
+ bool write, bool success)
+{
+ if (tracepoint_enabled(mmap_lock_acquire_returned))
+ __mmap_lock_do_trace_acquire_returned(mm, write, success);
+}
+
+static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
+{
+ if (tracepoint_enabled(mmap_lock_released))
+ __mmap_lock_do_trace_released(mm, write);
+}
+
+#else /* !CONFIG_TRACING */
+
+static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
+ bool write)
+{
+}
+
+static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
+ bool write, bool success)
+{
+}
+
+static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
+{
+}
+
+#endif /* CONFIG_TRACING */
+
static inline void mmap_init_lock(struct mm_struct *mm)
{
init_rwsem(&mm->mmap_lock);
@@ -13,57 +67,86 @@ static inline void mmap_init_lock(struct mm_struct *mm)
static inline void mmap_write_lock(struct mm_struct *mm)
{
+ __mmap_lock_trace_start_locking(mm, true);
down_write(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
}
static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
{
+ __mmap_lock_trace_start_locking(mm, true);
down_write_nested(&mm->mmap_lock, subclass);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
}
static inline int mmap_write_lock_killable(struct mm_struct *mm)
{
- return down_write_killable(&mm->mmap_lock);
+ int ret;
+
+ __mmap_lock_trace_start_locking(mm, true);
+ ret = down_write_killable(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
+ return ret;
}
static inline bool mmap_write_trylock(struct mm_struct *mm)
{
- return down_write_trylock(&mm->mmap_lock) != 0;
+ bool ret;
+
+ __mmap_lock_trace_start_locking(mm, true);
+ ret = down_write_trylock(&mm->mmap_lock) != 0;
+ __mmap_lock_trace_acquire_returned(mm, true, ret);
+ return ret;
}
static inline void mmap_write_unlock(struct mm_struct *mm)
{
up_write(&mm->mmap_lock);
+ __mmap_lock_trace_released(mm, true);
}
static inline void mmap_write_downgrade(struct mm_struct *mm)
{
downgrade_write(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, true);
}
static inline void mmap_read_lock(struct mm_struct *mm)
{
+ __mmap_lock_trace_start_locking(mm, false);
down_read(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, true);
}
static inline int mmap_read_lock_killable(struct mm_struct *mm)
{
- return down_read_killable(&mm->mmap_lock);
+ int ret;
+
+ __mmap_lock_trace_start_locking(mm, false);
+ ret = down_read_killable(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
+ return ret;
}
static inline bool mmap_read_trylock(struct mm_struct *mm)
{
- return down_read_trylock(&mm->mmap_lock) != 0;
+ bool ret;
+
+ __mmap_lock_trace_start_locking(mm, false);
+ ret = down_read_trylock(&mm->mmap_lock) != 0;
+ __mmap_lock_trace_acquire_returned(mm, false, ret);
+ return ret;
}
static inline void mmap_read_unlock(struct mm_struct *mm)
{
up_read(&mm->mmap_lock);
+ __mmap_lock_trace_released(mm, false);
}
static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
{
- if (down_read_trylock(&mm->mmap_lock)) {
+ if (mmap_read_trylock(mm)) {
rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
return true;
}
@@ -73,6 +156,7 @@ static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
up_read_non_owner(&mm->mmap_lock);
+ __mmap_lock_trace_released(mm, false);
}
static inline void mmap_assert_locked(struct mm_struct *mm)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fb3bf696c05e..b593316bff3d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -113,8 +113,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
struct pglist_data;
/*
- * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
- * So add a wild amount of padding here to ensure that they fall into separate
+ * Add a wild amount of padding here to ensure datas fall into separate
* cachelines. There are very few zone structures in the machine, so space
* consumption is not a concern here.
*/
@@ -152,7 +151,6 @@ enum zone_stat_item {
NR_ZONE_UNEVICTABLE,
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
- NR_PAGETABLE, /* used for pagetables */
/* Second 128 byte cacheline */
NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
@@ -207,6 +205,7 @@ enum node_stat_item {
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
+ NR_PAGETABLE, /* used for pagetables */
NR_VM_NODE_STAT_ITEMS
};
@@ -276,6 +275,8 @@ enum lruvec_flags {
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
+ /* per lruvec lru_lock for memcg */
+ spinlock_t lru_lock;
/*
* These track the cost of reclaiming one LRU - file or anon -
* over the other. As the observed cost of reclaiming one LRU
@@ -354,26 +355,6 @@ enum zone_type {
* DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
* platforms may need both zones as they support peripherals with
* different DMA addressing limitations.
- *
- * Some examples:
- *
- * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the
- * rest of the lower 4G.
- *
- * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on
- * the specific device.
- *
- * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the
- * lower 4G.
- *
- * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary
- * depending on the specific device.
- *
- * - s390 uses ZONE_DMA fixed to the lower 2G.
- *
- * - ia64 and riscv only use ZONE_DMA32.
- *
- * - parisc uses neither.
*/
#ifdef CONFIG_ZONE_DMA
ZONE_DMA,
@@ -470,6 +451,12 @@ struct zone {
#endif
struct pglist_data *zone_pgdat;
struct per_cpu_pageset __percpu *pageset;
+ /*
+ * the high and batch values are copied to individual pagesets for
+ * faster access
+ */
+ int pageset_high;
+ int pageset_batch;
#ifndef CONFIG_SPARSEMEM
/*
@@ -796,7 +783,6 @@ typedef struct pglist_data {
/* Write-intensive fields used by page reclaim */
ZONE_PADDING(_pad1_)
- spinlock_t lru_lock;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -1429,17 +1415,6 @@ void sparse_init(void);
#endif /* CONFIG_SPARSEMEM */
/*
- * During memory init memblocks map pfns to nids. The search is expensive and
- * this caches recent lookups. The implementation of __early_pfn_to_nid
- * may treat start/end as pfns or sections.
- */
-struct mminit_pfnnid_cache {
- unsigned long last_start;
- unsigned long last_end;
- int last_nid;
-};
-
-/*
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
* need to check pfn validity within that MAX_ORDER_NR_PAGES block.
* pfn_valid_within() should be used in this case; we optimise this away
@@ -1451,37 +1426,6 @@ struct mminit_pfnnid_cache {
#define pfn_valid_within(pfn) (1)
#endif
-#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
-/*
- * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
- * associated with it or not. This means that a struct page exists for this
- * pfn. The caller cannot assume the page is fully initialized in general.
- * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
- * will ensure the struct page is fully online and initialized. Special pages
- * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
- *
- * In FLATMEM, it is expected that holes always have valid memmap as long as
- * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
- * that a valid section has a memmap for the entire section.
- *
- * However, an ARM, and maybe other embedded architectures in the future
- * free memmap backing holes to save memory on the assumption the memmap is
- * never used. The page_zone linkages are then broken even though pfn_valid()
- * returns true. A walker of the full memmap must then do this additional
- * check to ensure the memmap they are looking at is sane by making sure
- * the zone and PFN linkages are still valid. This is expensive, but walkers
- * of the full memmap are extremely rare.
- */
-bool memmap_valid_within(unsigned long pfn,
- struct page *page, struct zone *zone);
-#else
-static inline bool memmap_valid_within(unsigned long pfn,
- struct page *page, struct zone *zone)
-{
- return true;
-}
-#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 5b08a473cdba..c425290b21e2 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -838,4 +838,12 @@ struct mhi_device_id {
kernel_ulong_t driver_data;
};
+#define AUXILIARY_NAME_SIZE 32
+#define AUXILIARY_MODULE_PREFIX "auxiliary:"
+
+struct auxiliary_device_id {
+ char name[AUXILIARY_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 6264617bab4d..c4e7a887f469 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -475,6 +475,10 @@ struct module {
unsigned int num_bpf_raw_events;
struct bpf_raw_event_map *bpf_raw_events;
#endif
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+ unsigned int btf_data_size;
+ void *btf_data;
+#endif
#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 6b584cc4757c..360a0a7e7341 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -4,11 +4,50 @@
#include <linux/kobject.h>
#include <linux/list.h>
+#include <asm/msi.h>
+
+/* Dummy shadow structures if an architecture does not define them */
+#ifndef arch_msi_msg_addr_lo
+typedef struct arch_msi_msg_addr_lo {
+ u32 address_lo;
+} __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
+#endif
+
+#ifndef arch_msi_msg_addr_hi
+typedef struct arch_msi_msg_addr_hi {
+ u32 address_hi;
+} __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
+#endif
+
+#ifndef arch_msi_msg_data
+typedef struct arch_msi_msg_data {
+ u32 data;
+} __attribute__ ((packed)) arch_msi_msg_data_t;
+#endif
+/**
+ * msi_msg - Representation of a MSI message
+ * @address_lo: Low 32 bits of msi message address
+ * @arch_addrlo: Architecture specific shadow of @address_lo
+ * @address_hi: High 32 bits of msi message address
+ * (only used when device supports it)
+ * @arch_addrhi: Architecture specific shadow of @address_hi
+ * @data: MSI message data (usually 16 bits)
+ * @arch_data: Architecture specific shadow of @data
+ */
struct msi_msg {
- u32 address_lo; /* low 32 bits of msi message address */
- u32 address_hi; /* high 32 bits of msi message address */
- u32 data; /* 16 bits of msi message data */
+ union {
+ u32 address_lo;
+ arch_msi_msg_addr_lo_t arch_addr_lo;
+ };
+ union {
+ u32 address_hi;
+ arch_msi_msg_addr_hi_t arch_addr_hi;
+ };
+ union {
+ u32 data;
+ arch_msi_msg_data_t arch_data;
+ };
};
extern int pci_msi_ignore_mask;
@@ -243,7 +282,6 @@ struct msi_controller {
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
#include <linux/irqhandler.h>
-#include <asm/msi.h>
struct irq_domain;
struct irq_domain_ops;
diff --git a/include/linux/net.h b/include/linux/net.h
index 0dcd51feef02..9e2324efc26a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -240,7 +240,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
-struct socket *sock_from_file(struct file *file, int *err);
+struct socket *sock_from_file(struct file *file);
#define sockfd_put(sock) fput(sock->file)
int net_ratelimit(void);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 0b17c4322b09..934de56644e7 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -207,8 +207,8 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \
- NETIF_F_GSO_SCTP)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \
+ NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST)
/*
* If one device supports one of these features, then enable them
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fa275a054f46..7bf167993c05 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -34,7 +34,6 @@
#include <linux/workqueue.h>
#include <linux/dynamic_queue_limits.h>
-#include <linux/ethtool.h>
#include <net/net_namespace.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
@@ -51,6 +50,7 @@
struct netpoll_info;
struct device;
+struct ethtool_ops;
struct phy_device;
struct dsa_port;
struct ip_tunnel_parm;
@@ -350,23 +350,25 @@ struct napi_struct {
};
enum {
- NAPI_STATE_SCHED, /* Poll is scheduled */
- NAPI_STATE_MISSED, /* reschedule a napi */
- NAPI_STATE_DISABLE, /* Disable pending */
- NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
- NAPI_STATE_LISTED, /* NAPI added to system lists */
- NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
- NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_SCHED, /* Poll is scheduled */
+ NAPI_STATE_MISSED, /* reschedule a napi */
+ NAPI_STATE_DISABLE, /* Disable pending */
+ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
+ NAPI_STATE_LISTED, /* NAPI added to system lists */
+ NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
};
enum {
- NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
- NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
- NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
- NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
- NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
- NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
- NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
+ NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
+ NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
+ NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
+ NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
+ NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
};
enum gro_result {
@@ -437,6 +439,11 @@ static inline bool napi_disable_pending(struct napi_struct *n)
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
+static inline bool napi_prefer_busy_poll(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
+}
+
bool napi_schedule_prep(struct napi_struct *n);
/**
@@ -1490,7 +1497,7 @@ struct net_device_ops {
};
/**
- * enum net_device_priv_flags - &struct net_device priv_flags
+ * enum netdev_priv_flags - &struct net_device priv_flags
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
@@ -2557,6 +2564,18 @@ static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int l
u64_stats_update_end(&tstats->syncp);
}
+static inline void dev_sw_netstats_tx_add(struct net_device *dev,
+ unsigned int packets,
+ unsigned int len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_bytes += len;
+ tstats->tx_packets += packets;
+ u64_stats_update_end(&tstats->syncp);
+}
+
static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
{
struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
@@ -2584,6 +2603,20 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
#define netdev_alloc_pcpu_stats(type) \
__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
+#define devm_netdev_alloc_pcpu_stats(dev, type) \
+({ \
+ typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
+ if (pcpu_stats) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
+})
+
enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_UNKNOWN,
NETDEV_LAG_TX_TYPE_RANDOM,
@@ -2789,7 +2822,6 @@ unsigned long netdev_boot_base(const char *prefix, int unit);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
void dev_add_pack(struct packet_type *pt);
void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
@@ -2813,9 +2845,21 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
+
int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+
+static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+{
+ int ret;
+
+ ret = __dev_direct_xmit(skb, queue_id);
+ if (!dev_xmit_complete(ret))
+ kfree_skb(skb);
+ return ret;
+}
+
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
@@ -3581,7 +3625,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
}
/**
- * netif_subqueue_stopped - test status of subqueue
+ * __netif_subqueue_stopped - test status of subqueue
* @dev: network device
* @queue_index: sub queue index
*
@@ -3595,6 +3639,13 @@ static inline bool __netif_subqueue_stopped(const struct net_device *dev,
return netif_tx_queue_stopped(txq);
}
+/**
+ * netif_subqueue_stopped - test status of subqueue
+ * @dev: network device
+ * @skb: sub queue buffer pointer
+ *
+ * Check individual transmit queue of a device with multiple transmit queues.
+ */
static inline bool netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
@@ -4506,6 +4557,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats);
void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats __percpu *netstats);
+void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index ab192720e2d6..46d9a0c26c67 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -198,6 +198,9 @@ struct ip_set_region {
u32 elements; /* Number of elements vs timeout */
};
+/* The max revision number supported by any set type + 1 */
+#define IPSET_REVISION_MAX 9
+
/* The core set type structure */
struct ip_set_type {
struct list_head list;
@@ -215,6 +218,8 @@ struct ip_set_type {
u8 family;
/* Type revisions */
u8 revision_min, revision_max;
+ /* Revision-specific supported (create) flags */
+ u8 create_flags[IPSET_REVISION_MAX+1];
/* Set features to control swapping */
u16 features;
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 5deb099d156d..8ebb64193757 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -227,7 +227,7 @@ struct xt_table {
unsigned int valid_hooks;
/* Man behind the curtain... */
- struct xt_table_info *private;
+ struct xt_table_info __rcu *private;
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
@@ -448,6 +448,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
+struct xt_table_info
+*xt_table_get_private_protected(const struct xt_table *table);
+
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index c32c15216da3..f0373a6cb5fb 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -55,6 +55,7 @@ struct nfs_page {
unsigned short wb_nio; /* Number of I/O attempts */
};
+struct nfs_pgio_mirror;
struct nfs_pageio_descriptor;
struct nfs_pageio_ops {
void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
@@ -64,6 +65,9 @@ struct nfs_pageio_ops {
unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
struct nfs_page *);
void (*pg_cleanup)(struct nfs_pageio_descriptor *);
+ struct nfs_pgio_mirror *
+ (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32);
+ u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32);
};
struct nfs_rw_ops {
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
index 5fbc4000358f..0f1d024bd958 100644
--- a/include/linux/ns_common.h
+++ b/include/linux/ns_common.h
@@ -2,12 +2,15 @@
#ifndef _LINUX_NS_COMMON_H
#define _LINUX_NS_COMMON_H
+#include <linux/refcount.h>
+
struct proc_ns_operations;
struct ns_common {
atomic_long_t stashed;
const struct proc_ns_operations *ops;
unsigned int inum;
+ refcount_t count;
};
#endif
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 06409a6c40bc..e162b757b6d5 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -31,6 +31,19 @@ enum nvmem_type {
#define NVMEM_DEVID_AUTO (-2)
/**
+ * struct nvmem_keepout - NVMEM register keepout range.
+ *
+ * @start: The first byte offset to avoid.
+ * @end: One beyond the last byte offset to avoid.
+ * @value: The byte to fill reads with for this region.
+ */
+struct nvmem_keepout {
+ unsigned int start;
+ unsigned int end;
+ unsigned char value;
+};
+
+/**
* struct nvmem_config - NVMEM device configuration
*
* @dev: Parent device.
@@ -39,6 +52,8 @@ enum nvmem_type {
* @owner: Pointer to exporter module. Used for refcounting.
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
+ * @keepout: Optional array of keepout ranges (sorted ascending by start).
+ * @nkeepout: Number of elements in the keepout array.
* @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
@@ -66,6 +81,8 @@ struct nvmem_config {
struct gpio_desc *wp_gpio;
const struct nvmem_cell_info *cells;
int ncells;
+ const struct nvmem_keepout *keepout;
+ unsigned int nkeepout;
enum nvmem_type type;
bool read_only;
bool root_only;
diff --git a/include/linux/of.h b/include/linux/of.h
index 5d51891cbf1a..4b27c9a27df3 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -108,7 +108,7 @@ static inline void of_node_init(struct device_node *node)
#if defined(CONFIG_OF_KOBJ)
kobject_init(&node->kobj, &of_node_ktype);
#endif
- node->fwnode.ops = &of_fwnode_ops;
+ fwnode_init(&node->fwnode, &of_fwnode_ops);
}
#if defined(CONFIG_OF_KOBJ)
@@ -558,6 +558,8 @@ int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
+phys_addr_t of_dma_get_max_cpu_address(struct device_node *np);
+
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -995,6 +997,11 @@ static inline int of_map_id(struct device_node *np, u32 id,
return -EINVAL;
}
+static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np)
+{
+ return PHYS_ADDR_MAX;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -1300,6 +1307,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__used __section("__" #table "_of_table") \
+ __aligned(__alignof__(struct of_device_id)) \
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4f6ba9379112..ec5d0290e0ee 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -86,8 +86,7 @@
*/
/*
- * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
- * locked- and dirty-page accounting.
+ * Don't use the pageflags directly. Use the PageFoo macros.
*
* The page flags field is split into two parts, the main flags area
* which extends from the low bits upwards, and the fields area which
@@ -335,6 +334,7 @@ PAGEFLAG(Referenced, referenced, PF_HEAD)
PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
+ TESTCLEARFLAG(LRU, lru, PF_HEAD)
PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
TESTCLEARFLAG(Active, active, PF_HEAD)
PAGEFLAG(Workingset, workingset, PF_HEAD)
@@ -363,8 +363,7 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
* for its own purposes.
* - PG_private and PG_private_2 cause releasepage() and co to be invoked
*/
-PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
- __CLEARPAGEFLAG(Private, private, PF_ANY)
+PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
@@ -715,9 +714,8 @@ PAGEFLAG_FALSE(DoubleMap)
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_offline 0x00000100
-#define PG_kmemcg 0x00000200
-#define PG_table 0x00000400
-#define PG_guard 0x00000800
+#define PG_table 0x00000200
+#define PG_guard 0x00000400
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -769,12 +767,6 @@ PAGE_TYPE_OPS(Buddy, buddy)
PAGE_TYPE_OPS(Offline, offline)
/*
- * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
- * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
- */
-PAGE_TYPE_OPS(Kmemcg, kmemcg)
-
-/*
* Marks pages in use as page tables.
*/
PAGE_TYPE_OPS(Table, table)
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index cfce186f0c4e..aff81ba31bd8 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -44,8 +44,12 @@ static inline void page_ext_init_flatmem(void)
{
}
extern void page_ext_init(void);
+static inline void page_ext_init_flatmem_late(void)
+{
+}
#else
extern void page_ext_init_flatmem(void);
+extern void page_ext_init_flatmem_late(void);
static inline void page_ext_init(void)
{
}
@@ -76,6 +80,10 @@ static inline void page_ext_init(void)
{
}
+static inline void page_ext_init_flatmem_late(void)
+{
+}
+
static inline void page_ext_init_flatmem(void)
{
}
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 081d934eda64..ad4ddc17d403 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -43,9 +43,6 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec,
unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
xa_mark_t tag);
-unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, pgoff_t end,
- xa_mark_t tag, unsigned max_pages);
static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 505480217cf1..bf7966776c55 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -163,6 +163,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+bool arm_pmu_irq_is_nmi(void);
+
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 96450f6fb1de..9a38f579bc76 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1028,6 +1028,8 @@ struct perf_sample_data {
u64 phys_addr;
u64 cgroup;
+ u64 data_page_size;
+ u64 code_page_size;
} ____cacheline_aligned;
/* default value for data source */
@@ -1585,4 +1587,8 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
+#ifdef CONFIG_MMU
+extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
+#endif
+
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index e237004d498d..8fcdfa52eb4b 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -258,6 +258,61 @@ static inline pte_t ptep_get(pte_t *ptep)
}
#endif
+#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
+/*
+ * WARNING: only to be used in the get_user_pages_fast() implementation.
+ *
+ * With get_user_pages_fast(), we walk down the pagetables without taking any
+ * locks. For this we would like to load the pointers atomically, but sometimes
+ * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What
+ * we do have is the guarantee that a PTE will only either go from not present
+ * to present, or present to not present or both -- it will not switch to a
+ * completely different present page without a TLB flush in between; something
+ * that we are blocking by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ *
+ * ptep->pte_high = h;
+ * smp_wmb();
+ * ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ *
+ * ptep->pte_low = 0;
+ * smp_wmb();
+ * ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
+ * We load pte_high *after* loading pte_low, which ensures we don't see an older
+ * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
+ * picked up a changed pte high. We might have gotten rubbish values from
+ * pte_low and pte_high, but we are guaranteed that pte_low will not have the
+ * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
+ * operates on present ptes we're safe.
+ */
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ pte_t pte;
+
+ do {
+ pte.pte_low = ptep->pte_low;
+ smp_rmb();
+ pte.pte_high = ptep->pte_high;
+ smp_rmb();
+ } while (unlikely(pte.pte_low != ptep->pte_low));
+
+ return pte;
+}
+#else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
+/*
+ * We require that the PTE can be read atomically.
+ */
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ return ptep_get(ptep);
+}
+#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
@@ -1494,4 +1549,20 @@ typedef unsigned int pgtbl_mod_mask;
#define pmd_leaf(x) 0
#endif
+#ifndef pgd_leaf_size
+#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
+#endif
+#ifndef p4d_leaf_size
+#define p4d_leaf_size(x) P4D_SIZE
+#endif
+#ifndef pud_leaf_size
+#define pud_leaf_size(x) PUD_SIZE
+#endif
+#ifndef pmd_leaf_size
+#define pmd_leaf_size(x) PMD_SIZE
+#endif
+#ifndef pte_leaf_size
+#define pte_leaf_size(x) PAGE_SIZE
+#endif
+
#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 56563e5e0dc7..381a95732b6a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -743,18 +743,11 @@ struct phy_driver {
/** @read_status: Determines the negotiated speed and duplex */
int (*read_status)(struct phy_device *phydev);
- /** @ack_interrupt: Clears any pending interrupts */
- int (*ack_interrupt)(struct phy_device *phydev);
-
- /** @config_intr: Enables or disables interrupts */
- int (*config_intr)(struct phy_device *phydev);
-
- /**
- * @did_interrupt: Checks if the PHY generated an interrupt.
- * For multi-PHY devices with shared PHY interrupt pin
- * Set interrupt bits have to be cleared.
+ /** @config_intr: Enables or disables interrupts.
+ * It should also clear any pending interrupts prior to enabling the
+ * IRQs and after disabling them.
*/
- int (*did_interrupt)(struct phy_device *phydev);
+ int (*config_intr)(struct phy_device *phydev);
/** @handle_interrupt: Override default interrupt handling */
irqreturn_t (*handle_interrupt)(struct phy_device *phydev);
@@ -1480,16 +1473,13 @@ int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_loopback(struct phy_device *phydev, bool enable);
int genphy_soft_reset(struct phy_device *phydev);
+irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev);
static inline int genphy_config_aneg(struct phy_device *phydev)
{
return __genphy_config_aneg(phydev, false);
}
-static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
-{
- return 0;
-}
static inline int genphy_no_config_intr(struct phy_device *phydev)
{
return 0;
@@ -1540,8 +1530,10 @@ void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
+void phy_error(struct phy_device *phydev);
void phy_state_machine(struct work_struct *work);
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
+void phy_trigger_machine(struct phy_device *phydev);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 5a5cb45ac57e..7c7e627503d2 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -8,7 +8,6 @@
#include <linux/workqueue.h>
#include <linux/threads.h>
#include <linux/nsproxy.h>
-#include <linux/kref.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
@@ -18,7 +17,6 @@
struct fs_pin;
struct pid_namespace {
- struct kref kref;
struct idr idr;
struct rcu_head rcu;
unsigned int pid_allocated;
@@ -43,7 +41,7 @@ extern struct pid_namespace init_pid_ns;
static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
if (ns != &init_pid_ns)
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
return ns;
}
diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h
deleted file mode 100644
index 3e0ffe2d5d3d..000000000000
--- a/include/linux/platform_data/ad7298.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AD7298 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD7298_H__
-#define __LINUX_PLATFORM_DATA_AD7298_H__
-
-/**
- * struct ad7298_platform_data - Platform data for the ad7298 ADC driver
- * @ext_ref: Whether to use an external reference voltage.
- **/
-struct ad7298_platform_data {
- bool ext_ref;
-};
-
-#endif /* IIO_ADC_AD7298_H_ */
diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h
deleted file mode 100644
index c2bd0a13bea1..000000000000
--- a/include/linux/platform_data/ad7303.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Analog Devices AD7303 DAC driver
- *
- * Copyright 2013 Analog Devices Inc.
- */
-
-#ifndef __IIO_ADC_AD7303_H__
-#define __IIO_ADC_AD7303_H__
-
-/**
- * struct ad7303_platform_data - AD7303 platform data
- * @use_external_ref: If set to true use an external voltage reference connected
- * to the REF pin, otherwise use the internal reference derived from Vdd.
- */
-struct ad7303_platform_data {
- bool use_external_ref;
-};
-
-#endif
diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h
index 732af46b2d16..9b4dca6ae70b 100644
--- a/include/linux/platform_data/ad7887.h
+++ b/include/linux/platform_data/ad7887.h
@@ -13,13 +13,9 @@
* second input channel, and Vref is internally connected to Vdd. If set to
* false the device is used in single channel mode and AIN1/Vref is used as
* VREF input.
- * @use_onchip_ref: Whether to use the onchip reference. If set to true the
- * internal 2.5V reference is used. If set to false a external reference is
- * used.
*/
struct ad7887_platform_data {
bool en_dual;
- bool use_onchip_ref;
};
#endif /* IIO_ADC_AD7887_H_ */
diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h
deleted file mode 100644
index 86667235077a..000000000000
--- a/include/linux/platform_data/adau1977.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ADAU1977/ADAU1978/ADAU1979 driver
- *
- * Copyright 2014 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__
-#define __LINUX_PLATFORM_DATA_ADAU1977_H__
-
-/**
- * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting
- * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V
- * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V
- * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V
- * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V
- * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V
- * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V
- * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V
- * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V
- * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V
- */
-enum adau1977_micbias {
- ADAU1977_MICBIAS_5V0 = 0x0,
- ADAU1977_MICBIAS_5V5 = 0x1,
- ADAU1977_MICBIAS_6V0 = 0x2,
- ADAU1977_MICBIAS_6V5 = 0x3,
- ADAU1977_MICBIAS_7V0 = 0x4,
- ADAU1977_MICBIAS_7V5 = 0x5,
- ADAU1977_MICBIAS_8V0 = 0x6,
- ADAU1977_MICBIAS_8V5 = 0x7,
- ADAU1977_MICBIAS_9V0 = 0x8,
-};
-
-/**
- * struct adau1977_platform_data - Platform configuration data for the ADAU1977
- * @micbias: Specifies the voltage for the MICBIAS pin
- */
-struct adau1977_platform_data {
- enum adau1977_micbias micbias;
-};
-
-#endif
diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h
deleted file mode 100644
index f20eaeb827ce..000000000000
--- a/include/linux/platform_data/at91_adc.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2011 Free Electrons
- */
-
-#ifndef _AT91_ADC_H_
-#define _AT91_ADC_H_
-
-enum atmel_adc_ts_type {
- ATMEL_ADC_TOUCHSCREEN_NONE = 0,
- ATMEL_ADC_TOUCHSCREEN_4WIRE = 4,
- ATMEL_ADC_TOUCHSCREEN_5WIRE = 5,
-};
-
-/**
- * struct at91_adc_trigger - description of triggers
- * @name: name of the trigger advertised to the user
- * @value: value to set in the ADC's trigger setup register
- to enable the trigger
- * @is_external: Does the trigger rely on an external pin?
- */
-struct at91_adc_trigger {
- const char *name;
- u8 value;
- bool is_external;
-};
-
-/**
- * struct at91_adc_data - platform data for ADC driver
- * @channels_used: channels in use on the board as a bitmask
- * @startup_time: startup time of the ADC in microseconds
- * @trigger_list: Triggers available in the ADC
- * @trigger_number: Number of triggers available in the ADC
- * @use_external_triggers: does the board has external triggers availables
- * @vref: Reference voltage for the ADC in millivolts
- * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires)
- */
-struct at91_adc_data {
- unsigned long channels_used;
- u8 startup_time;
- struct at91_adc_trigger *trigger_list;
- u8 trigger_number;
- bool use_external_triggers;
- u16 vref;
- enum atmel_adc_ts_type touchscreen_type;
-};
-
-extern void __init at91_add_device_adc(struct at91_adc_data *data);
-#endif
diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h
new file mode 100644
index 000000000000..388846766bb2
--- /dev/null
+++ b/include/linux/platform_data/hirschmann-hellcreek.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * Hirschmann Hellcreek TSN switch platform data.
+ *
+ * Copyright (C) 2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HIRSCHMANN_HELLCREEK_H_
+#define _HIRSCHMANN_HELLCREEK_H_
+
+#include <linux/types.h>
+
+struct hellcreek_platform_data {
+ int num_ports; /* Amount of switch ports */
+ int is_100_mbits; /* Is it configured to 100 or 1000 mbit/s */
+ int qbv_support; /* Qbv support on front TSN ports */
+ int qbv_on_cpu_port; /* Qbv support on the CPU port */
+ int qbu_support; /* Qbu support on front TSN ports */
+ u16 module_id; /* Module identificaton */
+};
+
+#endif /* _HIRSCHMANN_HELLCREEK_H_ */
diff --git a/include/linux/platform_data/media/coda.h b/include/linux/platform_data/media/coda.h
deleted file mode 100644
index 293b61b60c9d..000000000000
--- a/include/linux/platform_data/media/coda.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2013 Philipp Zabel, Pengutronix
- */
-#ifndef PLATFORM_CODA_H
-#define PLATFORM_CODA_H
-
-struct device;
-
-struct coda_platform_data {
- struct device *iram_dev;
-};
-
-#endif
diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h
deleted file mode 100644
index 0844b21372c7..000000000000
--- a/include/linux/platform_data/serial-imx.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- */
-
-#ifndef ASMARM_ARCH_UART_H
-#define ASMARM_ARCH_UART_H
-
-#define IMXUART_HAVE_RTSCTS (1<<0)
-
-struct imxuart_platform_data {
- unsigned int flags;
-};
-
-#endif
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index fe815d7d9f58..d661399b217d 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -10,8 +10,6 @@
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <linux/kernel.h>
-
#include <drm/drm_mode.h>
enum shmob_drm_clk_source {
diff --git a/include/linux/platform_data/usb-ehci-mxc.h b/include/linux/platform_data/usb-ehci-mxc.h
deleted file mode 100644
index ad9794d09bc8..000000000000
--- a/include/linux/platform_data/usb-ehci-mxc.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
-#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
-
-struct mxc_usbh_platform_data {
- int (*init)(struct platform_device *pdev);
- int (*exit)(struct platform_device *pdev);
-
- unsigned int portsc;
- struct usb_phy *otg;
-};
-
-#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
-
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 4d75633e6735..3f23f6e430bf 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -53,6 +53,9 @@ extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *,
unsigned int, unsigned int);
+extern struct resource *platform_get_mem_or_io(struct platform_device *,
+ unsigned int);
+
extern struct device *
platform_find_device_by_driver(struct device *start,
const struct device_driver *drv);
diff --git a/include/linux/poison.h b/include/linux/poison.h
index dc8ae5d8db03..aff1c9250c82 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -27,11 +27,7 @@
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
/********** mm/page_poison.c **********/
-#ifdef CONFIG_PAGE_POISONING_ZERO
-#define PAGE_POISON 0x00
-#else
#define PAGE_POISON 0xaa
-#endif
/********** mm/page_alloc.c ************/
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 7547857516d5..69cc8b64aa3a 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -324,34 +324,71 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
#endif
-/**
- * migrate_disable - Prevent migration of the current task
+#ifdef CONFIG_SMP
+
+/*
+ * Migrate-Disable and why it is undesired.
*
- * Maps to preempt_disable() which also disables preemption. Use
- * migrate_disable() to annotate that the intent is to prevent migration,
- * but not necessarily preemption.
+ * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * becomes one of the M highest priority tasks), it might still have to wait
+ * for the preemptee's migrate_disable() section to complete. Thereby suffering
+ * a reduction in bandwidth in the exact duration of the migrate_disable()
+ * section.
*
- * Can be invoked nested like preempt_disable() and needs the corresponding
- * number of migrate_enable() invocations.
- */
-static __always_inline void migrate_disable(void)
-{
- preempt_disable();
-}
-
-/**
- * migrate_enable - Allow migration of the current task
+ * Per this argument, the change from preempt_disable() to migrate_disable()
+ * gets us:
+ *
+ * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
+ * it would have had to wait for the lower priority task.
+ *
+ * - a lower priority tasks; which under preempt_disable() could've instantly
+ * migrated away when another CPU becomes available, is now constrained
+ * by the ability to push the higher priority task away, which might itself be
+ * in a migrate_disable() section, reducing it's available bandwidth.
+ *
+ * IOW it trades latency / moves the interference term, but it stays in the
+ * system, and as long as it remains unbounded, the system is not fully
+ * deterministic.
+ *
+ *
+ * The reason we have it anyway.
*
- * Counterpart to migrate_disable().
+ * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
+ * number of primitives into becoming preemptible, they would also allow
+ * migration. This turns out to break a bunch of per-cpu usage. To this end,
+ * all these primitives employ migirate_disable() to restore this implicit
+ * assumption.
*
- * As migrate_disable() can be invoked nested, only the outermost invocation
- * reenables migration.
+ * This is a 'temporary' work-around at best. The correct solution is getting
+ * rid of the above assumptions and reworking the code to employ explicit
+ * per-cpu locking or short preempt-disable regions.
+ *
+ * The end goal must be to get rid of migrate_disable(), alternatively we need
+ * a schedulability theory that does not depend on abritrary migration.
+ *
+ *
+ * Notes on the implementation.
+ *
+ * The implementation is particularly tricky since existing code patterns
+ * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
+ * This means that it cannot use cpus_read_lock() to serialize against hotplug,
+ * nor can it easily migrate itself into a pending affinity mask change on
+ * migrate_enable().
+ *
+ *
+ * Note: even non-work-conserving schedulers like semi-partitioned depends on
+ * migration, so migrate_disable() is not only a problem for
+ * work-conserving schedulers.
*
- * Currently mapped to preempt_enable().
*/
-static __always_inline void migrate_enable(void)
-{
- preempt_enable();
-}
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
+#else
+
+static inline void migrate_disable(void) { }
+static inline void migrate_enable(void) { }
+
+#endif /* CONFIG_SMP */
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/property.h b/include/linux/property.h
index 2d4542629d80..0a9001fe7aea 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -85,9 +85,12 @@ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_parent(
struct fwnode_handle *fwnode);
+struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode);
unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
unsigned int depth);
+bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor,
+ struct fwnode_handle *test_child);
struct fwnode_handle *fwnode_get_next_child_node(
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
struct fwnode_handle *fwnode_get_next_available_child_node(
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index c6487b7ab026..ae04968a3a47 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -31,6 +31,11 @@
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
#define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6)
+#define PTP_MSGTYPE_SYNC 0x0
+#define PTP_MSGTYPE_DELAY_REQ 0x1
+#define PTP_MSGTYPE_PDELAY_REQ 0x2
+#define PTP_MSGTYPE_PDELAY_RESP 0x3
+
#define PTP_EV_PORT 319
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
@@ -140,7 +145,7 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
/* The return is meaningless. The stub function would not be
* executed since no available header from ptp_parse_header.
*/
- return 0;
+ return PTP_MSGTYPE_SYNC;
}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index d3e8ba5c7125..0d47fd33b228 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -12,6 +12,19 @@
#include <linux/pps_kernel.h>
#include <linux/ptp_clock.h>
+/**
+ * struct ptp_clock_request - request PTP clock event
+ *
+ * @type: The type of the request.
+ * EXTTS: Configure external trigger timestamping
+ * PEROUT: Configure periodic output signal (e.g. PPS)
+ * PPS: trigger internal PPS event for input
+ * into kernel PPS subsystem
+ * @extts: describes configuration for external trigger timestamping.
+ * This is only valid when event == PTP_CLK_REQ_EXTTS.
+ * @perout: describes configuration for periodic output.
+ * This is only valid when event == PTP_CLK_REQ_PEROUT.
+ */
struct ptp_clock_request {
enum {
diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h
index b950e961cfa8..d7dc1559427f 100644
--- a/include/linux/purgatory.h
+++ b/include/linux/purgatory.h
@@ -3,7 +3,7 @@
#define _LINUX_PURGATORY_H
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <uapi/linux/kexec.h>
struct kexec_sha_region {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 57fb295ea41a..68d17a4fbf20 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -7,6 +7,7 @@
#ifndef _QED_IF_H
#define _QED_IF_H
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 6cdd0152c253..de0826411311 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -241,6 +241,11 @@ bool rcu_lockdep_current_cpu_online(void);
static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
+extern struct lockdep_map rcu_lock_map;
+extern struct lockdep_map rcu_bh_lock_map;
+extern struct lockdep_map rcu_sched_lock_map;
+extern struct lockdep_map rcu_callback_map;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void rcu_lock_acquire(struct lockdep_map *map)
@@ -253,10 +258,6 @@ static inline void rcu_lock_release(struct lockdep_map *map)
lock_release(map, _THIS_IP_);
}
-extern struct lockdep_map rcu_lock_map;
-extern struct lockdep_map rcu_bh_lock_map;
-extern struct lockdep_map rcu_sched_lock_map;
-extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
@@ -327,7 +328,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#else /* #ifdef CONFIG_PROVE_RCU */
-#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index 3e7919fc5f34..86c8f6c98412 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -11,10 +11,10 @@
#include <linux/sched.h>
#include <linux/rcupdate.h>
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-
extern struct lockdep_map rcu_trace_lock_map;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
static inline int rcu_read_lock_trace_held(void)
{
return lock_is_held(&rcu_trace_lock_map);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7c1ecdb356d8..2a97334eb786 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -89,6 +89,8 @@ static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
static inline void rcu_irq_exit_preempt(void) { }
static inline void rcu_irq_exit_check_preempt(void) { }
+#define rcu_is_idle_cpu(cpu) \
+ (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq())
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 59eb5cd567d7..df578b73960f 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -50,6 +50,7 @@ void rcu_irq_exit(void);
void rcu_irq_exit_preempt(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
+bool rcu_is_idle_cpu(int cpu);
#ifdef CONFIG_PROVE_RCU
void rcu_irq_exit_check_preempt(void);
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 497990c69b0b..b8a6e387f8f9 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -101,7 +101,7 @@
struct mutex;
/**
- * struct refcount_t - variant of atomic_t specialized for reference counts
+ * typedef refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at REFCOUNT_SATURATED and will not move once
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index e7834d98207f..a652d1474d6a 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -570,6 +570,10 @@ struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -619,6 +623,10 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -818,6 +826,19 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
sdw, config)
/**
+ * regmap_init_sdw_mbq() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
* regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
* to AVMM Bus Bridge
*
@@ -990,6 +1011,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
sdw, config)
/**
+ * devm_regmap_init_sdw_mbq() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
* devm_regmap_init_slimbus() - Initialise managed register map
*
* @slimbus: Device that will be interacted with
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 8ad2487a86d5..231e06b74b50 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -138,6 +138,17 @@ void rfkill_unregister(struct rfkill *rfkill);
void rfkill_destroy(struct rfkill *rfkill);
/**
+ * rfkill_set_hw_state_reason - Set the internal rfkill hardware block state
+ * with a reason
+ * @rfkill: pointer to the rfkill class to modify.
+ * @blocked: the current hardware block state to set
+ * @reason: one of &enum rfkill_hard_block_reasons
+ *
+ * Prefer to use rfkill_set_hw_state if you don't need any special reason.
+ */
+bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked, unsigned long reason);
+/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
* @blocked: the current hardware block state to set
@@ -156,7 +167,11 @@ void rfkill_destroy(struct rfkill *rfkill);
* should be blocked) so that drivers need not keep track of the soft
* block state -- which they might not be able to.
*/
-bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
+static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
+{
+ return rfkill_set_hw_state_reason(rfkill, blocked,
+ RFKILL_HARD_BLOCK_SIGNAL);
+}
/**
* rfkill_set_sw_state - Set the internal rfkill software block state
@@ -256,6 +271,13 @@ static inline void rfkill_destroy(struct rfkill *rfkill)
{
}
+static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked,
+ unsigned long reason)
+{
+ return blocked;
+}
+
static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
{
return blocked;
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 3a6adfa70fb0..70085ca1a3fc 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -91,7 +91,6 @@ enum ttu_flags {
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
- TTU_IGNORE_ACCESS = 0x10, /* don't age */
TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 22d1575e4991..b829382de6c3 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -110,13 +110,36 @@ struct rtc_device {
/* Some hardware can't support UIE mode */
int uie_unsupported;
- /* Number of nsec it takes to set the RTC clock. This influences when
- * the set ops are called. An offset:
- * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s
- * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s
- * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s
+ /*
+ * This offset specifies the update timing of the RTC.
+ *
+ * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds
+ *
+ * The offset defines how tsched is computed so that the write to
+ * the RTC (t2.tv_sec - 1sec) is correct versus the time required
+ * for the transport of the write and the time which the RTC needs
+ * to increment seconds the first time after the write (t2).
+ *
+ * For direct accessible RTCs tsched ~= t1 because the write time
+ * is negligible. For RTCs behind slow busses the transport time is
+ * significant and has to be taken into account.
+ *
+ * The time between the write (t1) and the first increment after
+ * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms,
+ * for many others it's exactly 1 second. Consult the datasheet.
+ *
+ * The value of this offset is also used to calculate the to be
+ * written value (t2.tv_sec - 1sec) at tsched.
+ *
+ * The default value for this is NSEC_PER_SEC + 10 msec default
+ * transport time. The offset can be adjusted by drivers so the
+ * calculation for the to be written value at tsched becomes
+ * correct:
+ *
+ * newval = tsched + set_offset_nsec - NSEC_PER_SEC
+ * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0
*/
- long set_offset_nsec;
+ unsigned long set_offset_nsec;
bool registered;
@@ -165,7 +188,6 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
-extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec);
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
@@ -205,39 +227,6 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
-/* Determine if we can call to driver to set the time. Drivers can only be
- * called to set a second aligned time value, and the field set_offset_nsec
- * specifies how far away from the second aligned time to call the driver.
- *
- * This also computes 'to_set' which is the time we are trying to set, and has
- * a zero in tv_nsecs, such that:
- * to_set - set_delay_nsec == now +/- FUZZ
- *
- */
-static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec,
- struct timespec64 *to_set,
- const struct timespec64 *now)
-{
- /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
- const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
- struct timespec64 delay = {.tv_sec = 0,
- .tv_nsec = set_offset_nsec};
-
- *to_set = timespec64_add(*now, delay);
-
- if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
- to_set->tv_nsec = 0;
- return true;
- }
-
- if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
- to_set->tv_sec++;
- to_set->tv_nsec = 0;
- return true;
- }
- return false;
-}
-
#define rtc_register_device(device) \
__rtc_register_device(THIS_MODULE, device)
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 745f5e73f99a..f895ccabbe29 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1174,6 +1174,7 @@ struct rtsx_pcr {
struct delayed_work carddet_work;
struct delayed_work idle_work;
+ struct delayed_work rtd3_work;
spinlock_t lock;
struct mutex pcr_mutex;
@@ -1183,6 +1184,7 @@ struct rtsx_pcr {
unsigned int cur_clock;
bool remove_pci;
bool msi_en;
+ bool is_runtime_suspended;
#define EXTRA_CAPS_SD_SDR50 (1 << 0)
#define EXTRA_CAPS_SD_SDR104 (1 << 1)
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 25e3fde85617..4c715be48717 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -123,6 +123,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
+extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
@@ -171,6 +172,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* See Documentation/locking/lockdep-design.rst for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
@@ -191,6 +193,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
+# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 36c47e7e66a2..6f70572b2938 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -19,12 +19,6 @@ struct scatterlist {
};
/*
- * Since the above length field is an unsigned int, below we define the maximum
- * length in bytes that can be stored in one scatterlist entry.
- */
-#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
-
-/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries dma_map_sg
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 76cd21fa5501..51d535b69bd6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -28,12 +28,14 @@
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
+#include <linux/syscall_user_dispatch.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
#include <linux/seqlock.h>
#include <linux/kcsan.h>
+#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -637,6 +639,13 @@ struct wake_q_node {
struct wake_q_node *next;
};
+struct kmap_ctrl {
+#ifdef CONFIG_KMAP_LOCAL
+ int idx;
+ pte_t pteval[KM_MAX_IDX];
+#endif
+};
+
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -722,6 +731,11 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
+ void *migration_pending;
+#ifdef CONFIG_SMP
+ unsigned short migration_disabled;
+#endif
+ unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -987,6 +1001,7 @@ struct task_struct {
unsigned int sessionid;
#endif
struct seccomp seccomp;
+ struct syscall_user_dispatch syscall_dispatch;
/* Thread group tracking: */
u64 parent_exec_id;
@@ -1311,6 +1326,7 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
+ struct kmap_ctrl kmap_ctrl;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
@@ -1348,6 +1364,10 @@ struct task_struct {
struct callback_head mce_kill_me;
#endif
+#ifdef CONFIG_KRETPROBES
+ struct llist_head kretprobe_instances;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h
index 9a62ffdd296f..412cdaba33eb 100644
--- a/include/linux/sched/hotplug.h
+++ b/include/linux/sched/hotplug.h
@@ -11,8 +11,10 @@ extern int sched_cpu_activate(unsigned int cpu);
extern int sched_cpu_deactivate(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_cpu_wait_empty(unsigned int cpu);
extern int sched_cpu_dying(unsigned int cpu);
#else
+# define sched_cpu_wait_empty NULL
# define sched_cpu_dying NULL
#endif
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index d5ece7a9a403..1ae08b8462a4 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -181,6 +181,22 @@ static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
/**
+ * might_alloc - Mark possible allocation sites
+ * @gfp_mask: gfp_t flags that would be used to allocate
+ *
+ * Similar to might_sleep() and other annotations, this can be used in functions
+ * that might allocate, but often don't. Compiles to nothing without
+ * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
+ */
+static inline void might_alloc(gfp_t gfp_mask)
+{
+ fs_reclaim_acquire(gfp_mask);
+ fs_reclaim_release(gfp_mask);
+
+ might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+}
+
+/**
* memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
*
* This functions marks the beginning of the GFP_NOIO allocation scope.
@@ -347,6 +363,8 @@ static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
extern void membarrier_exec_mmap(struct mm_struct *mm);
+extern void membarrier_update_current_mm(struct mm_struct *next_mm);
+
#else
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
@@ -361,6 +379,9 @@ static inline void membarrier_exec_mmap(struct mm_struct *mm)
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
}
+static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
+{
+}
#endif
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 1bad18a1d8ba..bd5afa076189 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -353,11 +353,25 @@ static inline int restart_syscall(void)
return -ERESTARTNOINTR;
}
-static inline int signal_pending(struct task_struct *p)
+static inline int task_sigpending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
+static inline int signal_pending(struct task_struct *p)
+{
+#if defined(TIF_NOTIFY_SIGNAL)
+ /*
+ * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
+ * behavior in terms of ensuring that we break out of wait loops
+ * so that notify signal callbacks can be processed.
+ */
+ if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
+ return 1;
+#endif
+ return task_sigpending(p);
+}
+
static inline int __fatal_signal_pending(struct task_struct *p)
{
return unlikely(sigismember(&p->pending.signal, SIGKILL));
@@ -365,7 +379,7 @@ static inline int __fatal_signal_pending(struct task_struct *p)
static inline int fatal_signal_pending(struct task_struct *p)
{
- return signal_pending(p) && __fatal_signal_pending(p);
+ return task_sigpending(p) && __fatal_signal_pending(p);
}
static inline int signal_pending_state(long state, struct task_struct *p)
@@ -502,7 +516,7 @@ extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
static inline void restore_saved_sigmask_unless(bool interrupted)
{
if (interrupted)
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+ WARN_ON(!signal_pending(current));
else
restore_saved_sigmask();
}
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 85fb2f34c59b..c0f71f2e7160 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -47,9 +47,7 @@ extern spinlock_t mmlist_lock;
extern union thread_union init_thread_union;
extern struct task_struct init_task;
-#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
-#endif /* #ifdef CONFIG_PROVE_RCU */
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 9ef7bf686a9f..8f0f778b7c91 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -225,6 +225,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
#endif /* !CONFIG_SMP */
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+extern void rebuild_sched_domains_energy(void);
+#else
+static inline void rebuild_sched_domains_energy(void)
+{
+}
+#endif
+
#ifndef arch_scale_cpu_capacity
/**
* arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
diff --git a/include/linux/scs.h b/include/linux/scs.h
index 6dec390cf154..18122d9e17ff 100644
--- a/include/linux/scs.h
+++ b/include/linux/scs.h
@@ -15,24 +15,18 @@
#ifdef CONFIG_SHADOW_CALL_STACK
-/*
- * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
- * architecture) provided ~40% safety margin on stack usage while keeping
- * memory allocation overhead reasonable.
- */
-#define SCS_SIZE SZ_1K
+#define SCS_ORDER 0
+#define SCS_SIZE (PAGE_SIZE << SCS_ORDER)
#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
/* An illegal pointer value to mark the end of the shadow stack. */
#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA)
-/* Allocate a static per-CPU shadow stack */
-#define DEFINE_SCS(name) \
- DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \
-
#define task_scs(tsk) (task_thread_info(tsk)->scs_base)
#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp)
+void *scs_alloc(int node);
+void scs_free(void *s);
void scs_init(void);
int scs_prepare(struct task_struct *tsk, int node);
void scs_release(struct task_struct *tsk);
@@ -61,6 +55,8 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk)
#else /* CONFIG_SHADOW_CALL_STACK */
+static inline void *scs_alloc(int node) { return NULL; }
+static inline void scs_free(void *s) {}
static inline void scs_init(void) {}
static inline void scs_task_reset(struct task_struct *tsk) {}
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 76731230bbc5..bb1926589693 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -482,11 +482,13 @@ enum sctp_error {
* 11 Restart of an association with new addresses
* 12 User Initiated Abort
* 13 Protocol Violation
+ * 14 Restart of an Association with New Encapsulation Port
*/
SCTP_ERROR_RESTART = cpu_to_be16(0x0b),
SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c),
SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d),
+ SCTP_ERROR_NEW_ENCAP_PORT = cpu_to_be16(0x0e),
/* ADDIP Section 3.3 New Error Causes
*
@@ -793,4 +795,22 @@ enum {
SCTP_FLOWLABEL_VAL_MASK = 0xfffff
};
+/* UDP Encapsulation
+ * draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.html#section-4-4
+ *
+ * The error cause indicating an "Restart of an Association with
+ * New Encapsulation Port"
+ *
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cause Code = 14 | Cause Length = 8 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Current Encapsulation Port | New Encapsulation Port |
+ * +-------------------------------+-------------------------------+
+ */
+struct sctp_new_encap_port_hdr {
+ __be16 cur_port;
+ __be16 new_port;
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/sdla.h b/include/linux/sdla.h
deleted file mode 100644
index 00e8b3b614f0..000000000000
--- a/include/linux/sdla.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Global definitions for the Frame relay interface.
- *
- * Version: @(#)if_ifrad.h 0.20 13 Apr 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan Structure packing
- *
- * 0.20 Mike McLagan New flags for S508 buffer handling
- */
-#ifndef SDLA_H
-#define SDLA_H
-
-#include <uapi/linux/sdla.h>
-
-
-/* important Z80 window addresses */
-#define SDLA_CONTROL_WND 0xE000
-
-#define SDLA_502_CMD_BUF 0xEF60
-#define SDLA_502_RCV_BUF 0xA900
-#define SDLA_502_TXN_AVAIL 0xFFF1
-#define SDLA_502_RCV_AVAIL 0xFFF2
-#define SDLA_502_EVENT_FLAGS 0xFFF3
-#define SDLA_502_MDM_STATUS 0xFFF4
-#define SDLA_502_IRQ_INTERFACE 0xFFFD
-#define SDLA_502_IRQ_PERMISSION 0xFFFE
-#define SDLA_502_DATA_OFS 0x0010
-
-#define SDLA_508_CMD_BUF 0xE000
-#define SDLA_508_TXBUF_INFO 0xF100
-#define SDLA_508_RXBUF_INFO 0xF120
-#define SDLA_508_EVENT_FLAGS 0xF003
-#define SDLA_508_MDM_STATUS 0xF004
-#define SDLA_508_IRQ_INTERFACE 0xF010
-#define SDLA_508_IRQ_PERMISSION 0xF011
-#define SDLA_508_TSE_OFFSET 0xF012
-
-/* Event flags */
-#define SDLA_EVENT_STATUS 0x01
-#define SDLA_EVENT_DLCI_STATUS 0x02
-#define SDLA_EVENT_BAD_DLCI 0x04
-#define SDLA_EVENT_LINK_DOWN 0x40
-
-/* IRQ Trigger flags */
-#define SDLA_INTR_RX 0x01
-#define SDLA_INTR_TX 0x02
-#define SDLA_INTR_MODEM 0x04
-#define SDLA_INTR_COMPLETE 0x08
-#define SDLA_INTR_STATUS 0x10
-#define SDLA_INTR_TIMER 0x20
-
-/* DLCI status bits */
-#define SDLA_DLCI_DELETED 0x01
-#define SDLA_DLCI_ACTIVE 0x02
-#define SDLA_DLCI_WAITING 0x04
-#define SDLA_DLCI_NEW 0x08
-#define SDLA_DLCI_INCLUDED 0x40
-
-/* valid command codes */
-#define SDLA_INFORMATION_WRITE 0x01
-#define SDLA_INFORMATION_READ 0x02
-#define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03
-#define SDLA_SET_DLCI_CONFIGURATION 0x10
-#define SDLA_READ_DLCI_CONFIGURATION 0x11
-#define SDLA_DISABLE_COMMUNICATIONS 0x12
-#define SDLA_ENABLE_COMMUNICATIONS 0x13
-#define SDLA_READ_DLC_STATUS 0x14
-#define SDLA_READ_DLC_STATISTICS 0x15
-#define SDLA_FLUSH_DLC_STATISTICS 0x16
-#define SDLA_LIST_ACTIVE_DLCI 0x17
-#define SDLA_FLUSH_INFORMATION_BUFFERS 0x18
-#define SDLA_ADD_DLCI 0x20
-#define SDLA_DELETE_DLCI 0x21
-#define SDLA_ACTIVATE_DLCI 0x22
-#define SDLA_DEACTIVATE_DLCI 0x23
-#define SDLA_READ_MODEM_STATUS 0x30
-#define SDLA_SET_MODEM_STATUS 0x31
-#define SDLA_READ_COMMS_ERR_STATS 0x32
-#define SDLA_FLUSH_COMMS_ERR_STATS 0x33
-#define SDLA_READ_CODE_VERSION 0x40
-#define SDLA_SET_IRQ_TRIGGER 0x50
-#define SDLA_GET_IRQ_TRIGGER 0x51
-
-/* In channel signal types */
-#define SDLA_ICS_LINK_VERIFY 0x02
-#define SDLA_ICS_STATUS_ENQ 0x03
-
-/* modem status flags */
-#define SDLA_MODEM_DTR_HIGH 0x01
-#define SDLA_MODEM_RTS_HIGH 0x02
-#define SDLA_MODEM_DCD_HIGH 0x08
-#define SDLA_MODEM_CTS_HIGH 0x20
-
-/* used for RET_MODEM interpretation */
-#define SDLA_MODEM_DCD_LOW 0x01
-#define SDLA_MODEM_CTS_LOW 0x02
-
-/* return codes */
-#define SDLA_RET_OK 0x00
-#define SDLA_RET_COMMUNICATIONS 0x01
-#define SDLA_RET_CHANNEL_INACTIVE 0x02
-#define SDLA_RET_DLCI_INACTIVE 0x03
-#define SDLA_RET_DLCI_CONFIG 0x04
-#define SDLA_RET_BUF_TOO_BIG 0x05
-#define SDLA_RET_NO_DATA 0x05
-#define SDLA_RET_BUF_OVERSIZE 0x06
-#define SDLA_RET_CIR_OVERFLOW 0x07
-#define SDLA_RET_NO_BUFS 0x08
-#define SDLA_RET_TIMEOUT 0x0A
-#define SDLA_RET_MODEM 0x10
-#define SDLA_RET_CHANNEL_OFF 0x11
-#define SDLA_RET_CHANNEL_ON 0x12
-#define SDLA_RET_DLCI_STATUS 0x13
-#define SDLA_RET_DLCI_UNKNOWN 0x14
-#define SDLA_RET_COMMAND_INVALID 0x1F
-
-/* Configuration flags */
-#define SDLA_DIRECT_RECV 0x0080
-#define SDLA_TX_NO_EXCEPT 0x0020
-#define SDLA_NO_ICF_MSGS 0x1000
-#define SDLA_TX50_RX50 0x0000
-#define SDLA_TX70_RX30 0x2000
-#define SDLA_TX30_RX70 0x4000
-
-/* IRQ selection flags */
-#define SDLA_IRQ_RECEIVE 0x01
-#define SDLA_IRQ_TRANSMIT 0x02
-#define SDLA_IRQ_MODEM_STAT 0x04
-#define SDLA_IRQ_COMMAND 0x08
-#define SDLA_IRQ_CHANNEL 0x10
-#define SDLA_IRQ_TIMER 0x20
-
-/* definitions for PC memory mapping */
-#define SDLA_8K_WINDOW 0x01
-#define SDLA_S502_SEG_A 0x10
-#define SDLA_S502_SEG_C 0x20
-#define SDLA_S502_SEG_D 0x00
-#define SDLA_S502_SEG_E 0x30
-#define SDLA_S507_SEG_A 0x00
-#define SDLA_S507_SEG_B 0x40
-#define SDLA_S507_SEG_C 0x80
-#define SDLA_S507_SEG_E 0xC0
-#define SDLA_S508_SEG_A 0x00
-#define SDLA_S508_SEG_C 0x10
-#define SDLA_S508_SEG_D 0x08
-#define SDLA_S508_SEG_E 0x18
-
-/* SDLA adapter port constants */
-#define SDLA_IO_EXTENTS 0x04
-
-#define SDLA_REG_CONTROL 0x00
-#define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */
-#define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */
-#define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */
-
-#define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */
-#define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */
-#define SDLA_S508_STS 0x01 /* status reg for 508 */
-#define SDLA_S508_IDR 0x02 /* ID reg for 508 */
-
-/* control register flags */
-#define SDLA_S502A_START 0x00 /* start the CPU */
-#define SDLA_S502A_INTREQ 0x02
-#define SDLA_S502A_INTEN 0x04
-#define SDLA_S502A_HALT 0x08 /* halt the CPU */
-#define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */
-
-#define SDLA_S502E_CPUEN 0x01
-#define SDLA_S502E_ENABLE 0x02
-#define SDLA_S502E_INTACK 0x04
-
-#define SDLA_S507_ENABLE 0x01
-#define SDLA_S507_IRQ3 0x00
-#define SDLA_S507_IRQ4 0x20
-#define SDLA_S507_IRQ5 0x40
-#define SDLA_S507_IRQ7 0x60
-#define SDLA_S507_IRQ10 0x80
-#define SDLA_S507_IRQ11 0xA0
-#define SDLA_S507_IRQ12 0xC0
-#define SDLA_S507_IRQ15 0xE0
-
-#define SDLA_HALT 0x00
-#define SDLA_CPUEN 0x02
-#define SDLA_MEMEN 0x04
-#define SDLA_S507_EPROMWR 0x08
-#define SDLA_S507_EPROMCLK 0x10
-#define SDLA_S508_INTRQ 0x08
-#define SDLA_S508_INTEN 0x10
-
-struct sdla_cmd {
- char opp_flag;
- char cmd;
- short length;
- char retval;
- short dlci;
- char flags;
- short rxlost_int;
- long rxlost_app;
- char reserve[2];
- char data[SDLA_MAX_DATA]; /* transfer data buffer */
-} __attribute__((packed));
-
-struct intr_info {
- char flags;
- short txlen;
- char irq;
- char flags2;
- short timeout;
-} __attribute__((packed));
-
-/* found in the 508's control window at RXBUF_INFO */
-struct buf_info {
- unsigned short rse_num;
- unsigned long rse_base;
- unsigned long rse_next;
- unsigned long buf_base;
- unsigned short reserved;
- unsigned long buf_top;
-} __attribute__((packed));
-
-/* structure pointed to by rse_base in RXBUF_INFO struct */
-struct buf_entry {
- char opp_flag;
- short length;
- short dlci;
- char flags;
- short timestamp;
- short reserved[2];
- long buf_addr;
-} __attribute__((packed));
-
-#endif
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 02aef2844c38..47763f3999f7 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -42,7 +42,7 @@ struct seccomp {
extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
{
- if (unlikely(test_thread_flag(TIF_SECCOMP)))
+ if (unlikely(test_syscall_work(SECCOMP)))
return __secure_computing(NULL);
return 0;
}
diff --git a/include/linux/security.h b/include/linux/security.h
index bc2725491560..77c7fe58d8a0 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -127,6 +127,7 @@ enum lockdown_reason {
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
LOCKDOWN_XMON_RW,
+ LOCKDOWN_XFRM_SECRET,
LOCKDOWN_CONFIDENTIALITY_MAX,
};
@@ -869,7 +870,7 @@ static inline int security_inode_killpriv(struct dentry *dentry)
static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
{
- return -EOPNOTSUPP;
+ return cap_inode_getsecurity(inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
@@ -1358,7 +1359,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk);
void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
void security_sock_graft(struct sock*sk, struct socket *parent);
-int security_inet_conn_request(struct sock *sk,
+int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req);
void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req);
@@ -1519,7 +1520,7 @@ static inline void security_sock_graft(struct sock *sk, struct socket *parent)
{
}
-static inline int security_inet_conn_request(struct sock *sk,
+static inline int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return 0;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cbfc78b92b65..2f7bb92b4c9e 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -307,10 +307,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
__seqprop_case((s), mutex, prop), \
__seqprop_case((s), ww_mutex, prop))
-#define __seqcount_ptr(s) __seqprop(s, ptr)
-#define __seqcount_sequence(s) __seqprop(s, sequence)
-#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
-#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)
+#define seqprop_sequence(s) __seqprop(s, sequence)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)
+#define seqprop_assert(s) __seqprop(s, assert)
/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
@@ -328,13 +328,13 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/
#define __read_seqcount_begin(s) \
({ \
- unsigned seq; \
+ unsigned __seq; \
\
- while ((seq = __seqcount_sequence(s)) & 1) \
+ while ((__seq = seqprop_sequence(s)) & 1) \
cpu_relax(); \
\
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
- seq; \
+ __seq; \
})
/**
@@ -345,10 +345,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/
#define raw_read_seqcount_begin(s) \
({ \
- unsigned seq = __read_seqcount_begin(s); \
+ unsigned _seq = __read_seqcount_begin(s); \
\
smp_rmb(); \
- seq; \
+ _seq; \
})
/**
@@ -359,7 +359,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/
#define read_seqcount_begin(s) \
({ \
- seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
+ seqcount_lockdep_reader_access(seqprop_ptr(s)); \
raw_read_seqcount_begin(s); \
})
@@ -376,11 +376,11 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/
#define raw_read_seqcount(s) \
({ \
- unsigned seq = __seqcount_sequence(s); \
+ unsigned __seq = seqprop_sequence(s); \
\
smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
- seq; \
+ __seq; \
})
/**
@@ -425,9 +425,9 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
- __read_seqcount_t_retry(__seqcount_ptr(s), start)
+ do___read_seqcount_retry(seqprop_ptr(s), start)
-static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
kcsan_atomic_next(0);
return unlikely(READ_ONCE(s->sequence) != start);
@@ -445,27 +445,29 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
- read_seqcount_t_retry(__seqcount_ptr(s), start)
+ do_read_seqcount_retry(seqprop_ptr(s), start)
-static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
+static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return __read_seqcount_t_retry(s, start);
+ return do___read_seqcount_retry(s, start);
}
/**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Context: check write_seqcount_begin()
*/
#define raw_write_seqcount_begin(s) \
do { \
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
+ do_raw_write_seqcount_begin(seqprop_ptr(s)); \
} while (0)
-static inline void raw_write_seqcount_t_begin(seqcount_t *s)
+static inline void do_raw_write_seqcount_begin(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -475,16 +477,18 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
/**
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Context: check write_seqcount_end()
*/
#define raw_write_seqcount_end(s) \
do { \
- raw_write_seqcount_t_end(__seqcount_ptr(s)); \
+ do_raw_write_seqcount_end(seqprop_ptr(s)); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_enable(); \
} while (0)
-static inline void raw_write_seqcount_t_end(seqcount_t *s)
+static inline void do_raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
@@ -498,20 +502,21 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
* @subclass: lockdep nesting level
*
* See Documentation/locking/lockdep-design.rst
+ * Context: check write_seqcount_begin()
*/
#define write_seqcount_begin_nested(s, subclass) \
do { \
- __seqcount_assert_lock_held(s); \
+ seqprop_assert(s); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
+ do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
} while (0)
-static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
+static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- raw_write_seqcount_t_begin(s);
+ do_raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
@@ -519,46 +524,46 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
* write_seqcount_begin() - start a seqcount_t write side critical section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * write_seqcount_begin opens a write side critical section of the given
- * seqcount_t.
- *
- * Context: seqcount_t write side critical sections must be serialized and
- * non-preemptible. If readers can be invoked from hardirq or softirq
+ * Context: sequence counter write side sections must be serialized and
+ * non-preemptible. Preemption will be automatically disabled if and
+ * only if the seqcount write serialization lock is associated, and
+ * preemptible. If readers can be invoked from hardirq or softirq
* context, interrupts or bottom halves must be respectively disabled.
*/
#define write_seqcount_begin(s) \
do { \
- __seqcount_assert_lock_held(s); \
+ seqprop_assert(s); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- write_seqcount_t_begin(__seqcount_ptr(s)); \
+ do_write_seqcount_begin(seqprop_ptr(s)); \
} while (0)
-static inline void write_seqcount_t_begin(seqcount_t *s)
+static inline void do_write_seqcount_begin(seqcount_t *s)
{
- write_seqcount_t_begin_nested(s, 0);
+ do_write_seqcount_begin_nested(s, 0);
}
/**
* write_seqcount_end() - end a seqcount_t write side critical section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * The write section must've been opened with write_seqcount_begin().
+ * Context: Preemption will be automatically re-enabled if and only if
+ * the seqcount write serialization lock is associated, and preemptible.
*/
#define write_seqcount_end(s) \
do { \
- write_seqcount_t_end(__seqcount_ptr(s)); \
+ do_write_seqcount_end(seqprop_ptr(s)); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_enable(); \
} while (0)
-static inline void write_seqcount_t_end(seqcount_t *s)
+static inline void do_write_seqcount_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, _RET_IP_);
- raw_write_seqcount_t_end(s);
+ do_raw_write_seqcount_end(s);
}
/**
@@ -603,9 +608,9 @@ static inline void write_seqcount_t_end(seqcount_t *s)
* }
*/
#define raw_write_seqcount_barrier(s) \
- raw_write_seqcount_t_barrier(__seqcount_ptr(s))
+ do_raw_write_seqcount_barrier(seqprop_ptr(s))
-static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
+static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -623,9 +628,9 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
* will complete successfully and see data older than this.
*/
#define write_seqcount_invalidate(s) \
- write_seqcount_t_invalidate(__seqcount_ptr(s))
+ do_write_seqcount_invalidate(seqprop_ptr(s))
-static inline void write_seqcount_t_invalidate(seqcount_t *s)
+static inline void do_write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
kcsan_nestable_atomic_begin();
@@ -865,9 +870,9 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
}
/*
- * For all seqlock_t write side functions, use write_seqcount_*t*_begin()
- * instead of the generic write_seqcount_begin(). This way, no redundant
- * lockdep_assert_held() checks are added.
+ * For all seqlock_t write side functions, use the the internal
+ * do_write_seqcount_begin() instead of generic write_seqcount_begin().
+ * This way, no redundant lockdep_assert_held() checks are added.
*/
/**
@@ -886,7 +891,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount.seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
@@ -898,7 +903,7 @@ static inline void write_seqlock(seqlock_t *sl)
*/
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount.seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
}
@@ -912,7 +917,7 @@ static inline void write_sequnlock(seqlock_t *sl)
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount.seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
@@ -925,7 +930,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount.seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
}
@@ -939,7 +944,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount.seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
@@ -951,7 +956,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount.seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -960,7 +965,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_t_begin(&sl->seqcount.seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
return flags;
}
@@ -989,7 +994,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_t_end(&sl->seqcount.seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 2b70f736b091..9e655055112d 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -187,4 +187,9 @@ extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
u32 *capabilities));
+#ifdef CONFIG_SERIAL_8250_RT288X
+unsigned int au_serial_in(struct uart_port *p, int offset);
+void au_serial_out(struct uart_port *p, int offset, int value);
+#endif
+
#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index ff63c2963359..e1b684e33841 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -357,8 +357,8 @@ struct earlycon_id {
int (*setup)(struct earlycon_device *, const char *options);
};
-extern const struct earlycon_id *__earlycon_table[];
-extern const struct earlycon_id *__earlycon_table_end[];
+extern const struct earlycon_id __earlycon_table[];
+extern const struct earlycon_id __earlycon_table_end[];
#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
#define EARLYCON_USED_OR_UNUSED __used
@@ -366,19 +366,13 @@ extern const struct earlycon_id *__earlycon_table_end[];
#define EARLYCON_USED_OR_UNUSED __maybe_unused
#endif
-#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
- static const struct earlycon_id unique_id \
- EARLYCON_USED_OR_UNUSED __initconst \
+#define OF_EARLYCON_DECLARE(_name, compat, fn) \
+ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
+ EARLYCON_USED_OR_UNUSED __section("__earlycon_table") \
+ __aligned(__alignof__(struct earlycon_id)) \
= { .name = __stringify(_name), \
.compatible = compat, \
- .setup = fn }; \
- static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
- __section("__earlycon_table") \
- * const __PASTE(__p, unique_id) = &unique_id
-
-#define OF_EARLYCON_DECLARE(_name, compat, fn) \
- _OF_EARLYCON_DECLARE(_name, compat, fn, \
- __UNIQUE_ID(__earlycon_##_name))
+ .setup = fn }
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h
deleted file mode 100644
index 619d748dcd44..000000000000
--- a/include/linux/serial_pnx8xxx.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Embedded Alley Solutions, source@embeddedalley.com.
- */
-
-#ifndef _LINUX_SERIAL_PNX8XXX_H
-#define _LINUX_SERIAL_PNX8XXX_H
-
-#include <linux/serial_core.h>
-
-#define PNX8XXX_NR_PORTS 2
-
-struct pnx8xxx_port {
- struct uart_port port;
- struct timer_list timer;
- unsigned int old_status;
-};
-
-/* register offsets */
-#define PNX8XXX_LCR 0
-#define PNX8XXX_MCR 0x004
-#define PNX8XXX_BAUD 0x008
-#define PNX8XXX_CFG 0x00c
-#define PNX8XXX_FIFO 0x028
-#define PNX8XXX_ISTAT 0xfe0
-#define PNX8XXX_IEN 0xfe4
-#define PNX8XXX_ICLR 0xfe8
-#define PNX8XXX_ISET 0xfec
-#define PNX8XXX_PD 0xff4
-#define PNX8XXX_MID 0xffc
-
-#define PNX8XXX_UART_LCR_TXBREAK (1<<30)
-#define PNX8XXX_UART_LCR_PAREVN 0x10000000
-#define PNX8XXX_UART_LCR_PAREN 0x08000000
-#define PNX8XXX_UART_LCR_2STOPB 0x04000000
-#define PNX8XXX_UART_LCR_8BIT 0x01000000
-#define PNX8XXX_UART_LCR_TX_RST 0x00040000
-#define PNX8XXX_UART_LCR_RX_RST 0x00020000
-#define PNX8XXX_UART_LCR_RX_NEXT 0x00010000
-
-#define PNX8XXX_UART_MCR_SCR 0xFF000000
-#define PNX8XXX_UART_MCR_DCD 0x00800000
-#define PNX8XXX_UART_MCR_CTS 0x00100000
-#define PNX8XXX_UART_MCR_LOOP 0x00000010
-#define PNX8XXX_UART_MCR_RTS 0x00000002
-#define PNX8XXX_UART_MCR_DTR 0x00000001
-
-#define PNX8XXX_UART_INT_TX 0x00000080
-#define PNX8XXX_UART_INT_EMPTY 0x00000040
-#define PNX8XXX_UART_INT_RCVTO 0x00000020
-#define PNX8XXX_UART_INT_RX 0x00000010
-#define PNX8XXX_UART_INT_RXOVRN 0x00000008
-#define PNX8XXX_UART_INT_FRERR 0x00000004
-#define PNX8XXX_UART_INT_BREAK 0x00000002
-#define PNX8XXX_UART_INT_PARITY 0x00000001
-#define PNX8XXX_UART_INT_ALLRX 0x0000003F
-#define PNX8XXX_UART_INT_ALLTX 0x000000C0
-
-#define PNX8XXX_UART_FIFO_TXFIFO 0x001F0000
-#define PNX8XXX_UART_FIFO_TXFIFO_STA (0x1f<<16)
-#define PNX8XXX_UART_FIFO_RXBRK 0x00008000
-#define PNX8XXX_UART_FIFO_RXFE 0x00004000
-#define PNX8XXX_UART_FIFO_RXPAR 0x00002000
-#define PNX8XXX_UART_FIFO_RXFIFO 0x00001F00
-#define PNX8XXX_UART_FIFO_RBRTHR 0x000000FF
-
-#endif
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 860e0f843c12..fe1aa4e54680 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -23,6 +23,11 @@ static inline int set_direct_map_default_noflush(struct page *page)
{
return 0;
}
+
+static inline bool kernel_page_present(struct page *page)
+{
+ return true;
+}
#endif
#ifndef set_mce_nospec
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a5a5d1d4d7b1..d82b6f396588 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -67,7 +67,11 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
#ifdef CONFIG_SHMEM
-extern bool shmem_mapping(struct address_space *mapping);
+extern const struct address_space_operations shmem_aops;
+static inline bool shmem_mapping(struct address_space *mapping)
+{
+ return mapping->a_ops == &shmem_aops;
+}
#else
static inline bool shmem_mapping(struct address_space *mapping)
{
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b256f9c65661..205526c4003a 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -469,4 +469,18 @@ struct seq_file;
extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
#endif
+#ifndef arch_untagged_si_addr
+/*
+ * Given a fault address and a signal and si_code which correspond to the
+ * _sigfault union member, returns the address that must appear in si_addr if
+ * the signal handler does not have SA_EXPOSE_TAGBITS enabled in sa_flags.
+ */
+static inline void __user *arch_untagged_si_addr(void __user *addr,
+ unsigned long sig,
+ unsigned long si_code)
+{
+ return addr;
+}
+#endif
+
#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index f8a90ae9c6ec..68e06c75c5b2 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -68,4 +68,16 @@ struct ksignal {
int sig;
};
+#ifndef __ARCH_UAPI_SA_FLAGS
+#ifdef SA_RESTORER
+#define __ARCH_UAPI_SA_FLAGS SA_RESTORER
+#else
+#define __ARCH_UAPI_SA_FLAGS 0
+#endif
+#endif
+
+#define UAPI_SA_FLAGS \
+ (SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
+ SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS | __ARCH_UAPI_SA_FLAGS)
+
#endif /* _LINUX_SIGNAL_TYPES_H */
diff --git a/include/linux/siox.h b/include/linux/siox.h
index da7225bf1877..6bfbda3f634c 100644
--- a/include/linux/siox.h
+++ b/include/linux/siox.h
@@ -36,7 +36,7 @@ bool siox_device_connected(struct siox_device *sdevice);
struct siox_driver {
int (*probe)(struct siox_device *sdevice);
- int (*remove)(struct siox_device *sdevice);
+ void (*remove)(struct siox_device *sdevice);
void (*shutdown)(struct siox_device *sdevice);
/*
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a828cf99c521..333bcdc39635 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -701,6 +701,7 @@ typedef unsigned char *sk_buff_data_t;
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
+ * @kcov_handle: KCOV remote handle for remote coverage collection
* @tail: Tail pointer
* @end: End pointer
* @head: Head of buffer
@@ -904,6 +905,10 @@ struct sk_buff {
__u16 network_header;
__u16 mac_header;
+#ifdef CONFIG_KCOV
+ u64 kcov_handle;
+#endif
+
/* private: */
__u32 headers_end[0];
/* public: */
@@ -4605,5 +4610,22 @@ static inline void skb_reset_redirect(struct sk_buff *skb)
#endif
}
+static inline void skb_set_kcov_handle(struct sk_buff *skb,
+ const u64 kcov_handle)
+{
+#ifdef CONFIG_KCOV
+ skb->kcov_handle = kcov_handle;
+#endif
+}
+
+static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
+{
+#ifdef CONFIG_KCOV
+ return skb->kcov_handle;
+#else
+ return 0;
+#endif
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index dd6897f62010..be4ba5867ac5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -593,6 +593,24 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
}
/**
+ * krealloc_array - reallocate memory for an array.
+ * @p: pointer to the memory chunk to reallocate
+ * @new_n: new number of elements to alloc
+ * @new_size: new size of a single member of the array
+ * @flags: the type of memory to allocate (see kmalloc)
+ */
+static __must_check inline void *
+krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return krealloc(p, bytes, flags);
+}
+
+/**
* kcalloc - allocate memory for an array. The memory is set to zero.
* @n: number of elements.
* @size: element size.
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9f13966d3d92..70c6f6284dcf 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -21,24 +21,23 @@ typedef bool (*smp_cond_func_t)(int cpu, void *info);
* structure shares (partial) layout with struct irq_work
*/
struct __call_single_data {
- union {
- struct __call_single_node node;
- struct {
- struct llist_node llist;
- unsigned int flags;
-#ifdef CONFIG_64BIT
- u16 src, dst;
-#endif
- };
- };
+ struct __call_single_node node;
smp_call_func_t func;
void *info;
};
+#define CSD_INIT(_func, _info) \
+ (struct __call_single_data){ .func = (_func), .info = (_info), }
+
/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
typedef struct __call_single_data call_single_data_t
__aligned(sizeof(struct __call_single_data));
+#define INIT_CSD(_csd, _func, _info) \
+do { \
+ *(_csd) = CSD_INIT((_func), (_info)); \
+} while (0)
+
/*
* Enqueue a llist_node on the call_single_queue; be very careful, read
* flush_smp_call_function_queue() in detail.
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
new file mode 100644
index 000000000000..ae2279fe830a
--- /dev/null
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __SOC_OTX2_ASM_H
+#define __SOC_OTX2_ASM_H
+
+#if defined(CONFIG_ARM64)
+/*
+ * otx2_lmt_flush is used for LMT store operation.
+ * On octeontx2 platform CPT instruction enqueue and
+ * NIX packet send are only possible via LMTST
+ * operations and it uses LDEOR instruction targeting
+ * the coprocessor address.
+ */
+#define otx2_lmt_flush(ioaddr) \
+({ \
+ u64 result = 0; \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "ldeor xzr, %x[rf], [%[rs]]" \
+ : [rf]"=r" (result) \
+ : [rs]"r" (ioaddr)); \
+ (result); \
+})
+#else
+#define otx2_lmt_flush(ioaddr) ({ 0; })
+#endif
+
+#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 41cc1192f9aa..f0b01b728640 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -359,6 +359,7 @@ struct sdw_dpn_prop {
* @sink_dpn_prop: Sink Data Port N properties
* @scp_int1_mask: SCP_INT1_MASK desired settings
* @quirks: bitmask identifying deltas from the MIPI specification
+ * @is_sdca: the Slave supports the SDCA specification
*/
struct sdw_slave_prop {
u32 mipi_revision;
@@ -382,6 +383,7 @@ struct sdw_slave_prop {
struct sdw_dpn_prop *sink_dpn_prop;
u8 scp_int1_mask;
u32 quirks;
+ bool is_sdca;
};
#define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0)
@@ -479,10 +481,12 @@ struct sdw_slave_id {
/**
* struct sdw_slave_intr_status - Slave interrupt status
+ * @sdca_cascade: set if the Slave device reports an SDCA interrupt
* @control_port: control port status
* @port: data port status
*/
struct sdw_slave_intr_status {
+ bool sdca_cascade;
u8 control_port;
u8 port[15];
};
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index f420e8059779..138bec908c40 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -41,6 +41,12 @@
#define SDW_DP0_INT_IMPDEF1 BIT(5)
#define SDW_DP0_INT_IMPDEF2 BIT(6)
#define SDW_DP0_INT_IMPDEF3 BIT(7)
+#define SDW_DP0_INTERRUPTS (SDW_DP0_INT_TEST_FAIL | \
+ SDW_DP0_INT_PORT_READY | \
+ SDW_DP0_INT_BRA_FAILURE | \
+ SDW_DP0_INT_IMPDEF1 | \
+ SDW_DP0_INT_IMPDEF2 | \
+ SDW_DP0_INT_IMPDEF3)
#define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2)
#define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4)
@@ -241,6 +247,11 @@
#define SDW_DPN_INT_IMPDEF1 BIT(5)
#define SDW_DPN_INT_IMPDEF2 BIT(6)
#define SDW_DPN_INT_IMPDEF3 BIT(7)
+#define SDW_DPN_INTERRUPTS (SDW_DPN_INT_TEST_FAIL | \
+ SDW_DPN_INT_PORT_READY | \
+ SDW_DPN_INT_IMPDEF1 | \
+ SDW_DPN_INT_IMPDEF2 | \
+ SDW_DPN_INT_IMPDEF3)
#define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0)
#define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2)
@@ -298,4 +309,36 @@
#define SDW_CASC_PORT_MASK_INTSTAT3 1
#define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2
+/*
+ * v1.2 device - SDCA address mapping
+ *
+ * Spec definition
+ * Bits Contents
+ * 31 0 (required by addressing range)
+ * 30:26 0b10000 (Control Prefix)
+ * 25 0 (Reserved)
+ * 24:22 Function Number [2:0]
+ * 21 Entity[6]
+ * 20:19 Control Selector[5:4]
+ * 18 0 (Reserved)
+ * 17:15 Control Number[5:3]
+ * 14 Next
+ * 13 MBQ
+ * 12:7 Entity[5:0]
+ * 6:3 Control Selector[3:0]
+ * 2:0 Control Number[2:0]
+ */
+
+#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \
+ (((fun) & 0x7) << 22) | \
+ (((ent) & 0x40) << 15) | \
+ (((ent) & 0x3f) << 7) | \
+ (((ctl) & 0x30) << 15) | \
+ (((ctl) & 0x0f) << 3) | \
+ (((ch) & 0x38) << 12) | \
+ ((ch) & 0x07))
+
+#define SDW_SDCA_MBQ_CTL(reg) ((reg) | BIT(13))
+#define SDW_SDCA_NEXT_CTL(reg) ((reg) | BIT(14))
+
#endif /* __SDW_REGISTERS_H */
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 394a3f68bad5..729bcbf9f5ad 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -138,6 +138,7 @@ struct spmi_driver {
struct device_driver driver;
int (*probe)(struct spmi_device *sdev);
void (*remove)(struct spmi_device *sdev);
+ void (*shutdown)(struct spmi_device *sdev);
};
static inline struct spmi_driver *to_spmi_driver(struct device_driver *d)
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 628e28903b8b..15ca6b4167cc 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -170,6 +170,7 @@ struct plat_stmmacenet_data {
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
+ u32 addr64;
u32 rx_queues_to_use;
u32 tx_queues_to_use;
u8 rx_sched_algorithm;
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 76d8b09384a7..30577c3aecf8 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -24,6 +24,7 @@ typedef int (*cpu_stop_fn_t)(void *arg);
struct cpu_stop_work {
struct list_head list; /* cpu_stopper->works */
cpu_stop_fn_t fn;
+ unsigned long caller;
void *arg;
struct cpu_stop_done *done;
};
@@ -36,6 +37,8 @@ void stop_machine_park(int cpu);
void stop_machine_unpark(int cpu);
void stop_machine_yield(const struct cpumask *cpumask);
+extern void print_stop_info(const char *log_lvl, struct task_struct *task);
+
#else /* CONFIG_SMP */
#include <linux/workqueue.h>
@@ -80,6 +83,8 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu,
return false;
}
+static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { }
+
#endif /* CONFIG_SMP */
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 667935c0dbd4..596bc2f4d9b0 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -338,8 +338,6 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file,
unsigned int nr_pages);
extern void lru_note_cost_page(struct page *);
extern void lru_cache_add(struct page *);
-extern void lru_add_page_tail(struct page *page, struct page *page_tail,
- struct lruvec *lruvec, struct list_head *head);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
@@ -358,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
+extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
diff --git a/include/linux/syscall_user_dispatch.h b/include/linux/syscall_user_dispatch.h
new file mode 100644
index 000000000000..a0ae443fb7df
--- /dev/null
+++ b/include/linux/syscall_user_dispatch.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Collabora Ltd.
+ */
+#ifndef _SYSCALL_USER_DISPATCH_H
+#define _SYSCALL_USER_DISPATCH_H
+
+#include <linux/thread_info.h>
+
+#ifdef CONFIG_GENERIC_ENTRY
+
+struct syscall_user_dispatch {
+ char __user *selector;
+ unsigned long offset;
+ unsigned long len;
+ bool on_dispatch;
+};
+
+int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
+ unsigned long len, char __user *selector);
+
+#define clear_syscall_work_syscall_user_dispatch(tsk) \
+ clear_task_syscall_work(tsk, SYSCALL_USER_DISPATCH)
+
+#else
+struct syscall_user_dispatch {};
+
+static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
+ unsigned long len, char __user *selector)
+{
+ return -EINVAL;
+}
+
+static inline void clear_syscall_work_syscall_user_dispatch(struct task_struct *tsk)
+{
+}
+
+#endif /* CONFIG_GENERIC_ENTRY */
+
+#endif /* _SYSCALL_USER_DISPATCH_H */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index e93e249a4e9b..c8a974cead73 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -35,6 +35,24 @@ enum {
GOOD_STACK,
};
+#ifdef CONFIG_GENERIC_ENTRY
+enum syscall_work_bit {
+ SYSCALL_WORK_BIT_SECCOMP,
+ SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
+ SYSCALL_WORK_BIT_SYSCALL_TRACE,
+ SYSCALL_WORK_BIT_SYSCALL_EMU,
+ SYSCALL_WORK_BIT_SYSCALL_AUDIT,
+ SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
+};
+
+#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
+#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
+#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
+#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
+#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
+#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
+#endif
+
#include <asm/thread_info.h>
#ifdef __KERNEL__
@@ -97,6 +115,38 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
+#ifdef CONFIG_GENERIC_ENTRY
+#define set_syscall_work(fl) \
+ set_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
+#define test_syscall_work(fl) \
+ test_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
+#define clear_syscall_work(fl) \
+ clear_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
+
+#define set_task_syscall_work(t, fl) \
+ set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+#define test_task_syscall_work(t, fl) \
+ test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+#define clear_task_syscall_work(t, fl) \
+ clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
+
+#else /* CONFIG_GENERIC_ENTRY */
+
+#define set_syscall_work(fl) \
+ set_ti_thread_flag(current_thread_info(), TIF_##fl)
+#define test_syscall_work(fl) \
+ test_ti_thread_flag(current_thread_info(), TIF_##fl)
+#define clear_syscall_work(fl) \
+ clear_ti_thread_flag(current_thread_info(), TIF_##fl)
+
+#define set_task_syscall_work(t, fl) \
+ set_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#define test_task_syscall_work(t, fl) \
+ test_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#define clear_task_syscall_work(t, fl) \
+ clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
+#endif /* !CONFIG_GENERIC_ENTRY */
+
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 5db2b11ab085..034dccf93955 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -179,6 +179,8 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @lock: Lock to serialize access to the following fields of this structure
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the link (1 or 2)
* @is_unplugged: The XDomain is unplugged
* @resume: The XDomain is being resumed
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
@@ -223,6 +225,8 @@ struct tb_xdomain {
struct mutex lock;
const char *vendor_name;
const char *device_name;
+ unsigned int link_speed;
+ unsigned int link_width;
bool is_unplugged;
bool resume;
bool needs_uuid;
@@ -243,6 +247,8 @@ struct tb_xdomain {
u8 depth;
};
+int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
+void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
u16 transmit_ring, u16 receive_path,
u16 receive_ring);
@@ -344,6 +350,9 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
* @prtcvers: Protocol version from the properties directory
* @prtcrevs: Protocol software revision from the properties directory
* @prtcstns: Protocol settings mask from the properties directory
+ * @debugfs_dir: Pointer to the service debugfs directory. Always created
+ * when debugfs is enabled. Can be used by service drivers to
+ * add their own entries under the service.
*
* Each domain exposes set of services it supports as collection of
* properties. For each service there will be one corresponding
@@ -357,6 +366,7 @@ struct tb_service {
u32 prtcvers;
u32 prtcrevs;
u32 prtcstns;
+ struct dentry *debugfs_dir;
};
static inline struct tb_service *tb_service_get(struct tb_service *svc)
@@ -471,6 +481,8 @@ struct tb_nhi {
* @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
* @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
* @flags: Ring specific flags
+ * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to
+ * RX ring. For TX ring this should be set to %0.
* @sof_mask: Bit mask used to detect start of frame PDF
* @eof_mask: Bit mask used to detect end of frame PDF
* @start_poll: Called when ring interrupt is triggered to start
@@ -494,6 +506,7 @@ struct tb_ring {
int irq;
u8 vector;
unsigned int flags;
+ int e2e_tx_hop;
u16 sof_mask;
u16 eof_mask;
void (*start_poll)(void *data);
@@ -504,6 +517,8 @@ struct tb_ring {
#define RING_FLAG_NO_SUSPEND BIT(0)
/* Configure the ring to be in frame mode */
#define RING_FLAG_FRAME BIT(1)
+/* Enable end-to-end flow control */
+#define RING_FLAG_E2E BIT(2)
struct ring_frame;
typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
@@ -552,7 +567,8 @@ struct ring_frame {
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags);
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags, u16 sof_mask, u16 eof_mask,
+ unsigned int flags, int e2e_tx_hop,
+ u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *), void *poll_data);
void tb_ring_start(struct tb_ring *ring);
void tb_ring_stop(struct tb_ring *ring);
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index 5b6031385db0..3146f1c056c9 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -4,7 +4,6 @@
#include <linux/sched.h>
-#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
@@ -18,7 +17,6 @@ struct timens_offsets {
};
struct time_namespace {
- struct kref kref;
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct ns_common ns;
@@ -37,20 +35,21 @@ extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns);
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
return ns;
}
struct time_namespace *copy_time_ns(unsigned long flags,
struct user_namespace *user_ns,
struct time_namespace *old_ns);
-void free_time_ns(struct kref *kref);
-int timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
+void free_time_ns(struct time_namespace *ns);
+void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
struct vdso_data *arch_get_vdso_data(void *vvar_page);
static inline void put_time_ns(struct time_namespace *ns)
{
- kref_put(&ns->kref, free_time_ns);
+ if (refcount_dec_and_test(&ns->ns.count))
+ free_time_ns(ns);
}
void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m);
@@ -77,6 +76,20 @@ static inline void timens_add_boottime(struct timespec64 *ts)
*ts = timespec64_add(*ts, ns_offsets->boottime);
}
+static inline u64 timens_add_boottime_ns(u64 nsec)
+{
+ struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+ return nsec + timespec64_to_ns(&ns_offsets->boottime);
+}
+
+static inline void timens_sub_boottime(struct timespec64 *ts)
+{
+ struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+ *ts = timespec64_sub(*ts, ns_offsets->boottime);
+}
+
ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim,
struct timens_offsets *offsets);
@@ -122,14 +135,22 @@ struct time_namespace *copy_time_ns(unsigned long flags,
return old_ns;
}
-static inline int timens_on_fork(struct nsproxy *nsproxy,
+static inline void timens_on_fork(struct nsproxy *nsproxy,
struct task_struct *tsk)
{
- return 0;
+ return;
}
static inline void timens_add_monotonic(struct timespec64 *ts) { }
static inline void timens_add_boottime(struct timespec64 *ts) { }
+
+static inline u64 timens_add_boottime_ns(u64 nsec)
+{
+ return nsec;
+}
+
+static inline void timens_sub_boottime(struct timespec64 *ts) { }
+
static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
{
return tim;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 7f7e4a3f4394..929d3f3937c0 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -303,6 +303,8 @@ extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
+#ifdef CONFIG_GENERIC_CMOS_UPDATE
extern int update_persistent_clock64(struct timespec64 now);
+#endif
#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index d10bc7e73b41..fda13c9d1256 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -193,7 +193,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer);
#define del_singleshot_timer_sync(t) del_timer_sync(t)
extern void init_timers(void);
-extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index ce0859763670..9c2e54faf9b7 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -157,7 +157,6 @@ extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex *
extern void hardpps(const struct timespec64 *, const struct timespec64 *);
int read_current_timer(unsigned long *timer_val);
-void ntp_notify_cmos_timer(void);
/* The clock frequency of the i8253/i8254 PIT */
#define PIT_TICK_RATE 1193182ul
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index b480e1a07ed8..54b925224a13 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -83,11 +83,12 @@ static inline int ptrace_report_syscall(struct pt_regs *regs,
* tracehook_report_syscall_entry - task is about to attempt a system call
* @regs: user register state of current task
*
- * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
- * when the current task has just entered the kernel for a system call.
- * Full user register state is available here. Changing the values
- * in @regs can affect the system call number and arguments to be tried.
- * It is safe to block here, preventing the system call from beginning.
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just
+ * entered the kernel for a system call. Full user register state is
+ * available here. Changing the values in @regs can affect the system
+ * call number and arguments to be tried. It is safe to block here,
+ * preventing the system call from beginning.
*
* Returns zero normally, or nonzero if the calling arch code should abort
* the system call. That must prevent normal entry so no system call is
@@ -109,15 +110,15 @@ static inline __must_check int tracehook_report_syscall_entry(
* @regs: user register state of current task
* @step: nonzero if simulating single-step or block-step
*
- * This will be called if %TIF_SYSCALL_TRACE has been set, when the
- * current task has just finished an attempted system call. Full
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when
+ * the current task has just finished an attempted system call. Full
* user register state is available here. It is safe to block here,
* preventing signals from being processed.
*
* If @step is nonzero, this report is also in lieu of the normal
* trap that would follow the system call instruction because
* user_enable_block_step() or user_enable_single_step() was used.
- * In this case, %TIF_SYSCALL_TRACE might not be set.
+ * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set.
*
* Called without locks, just before checking for pending signals.
*/
@@ -198,4 +199,31 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
blkcg_maybe_throttle_current();
}
+/*
+ * called by exit_to_user_mode_loop() if ti_work & _TIF_NOTIFY_SIGNAL. This
+ * is currently used by TWA_SIGNAL based task_work, which requires breaking
+ * wait loops to ensure that task_work is noticed and run.
+ */
+static inline void tracehook_notify_signal(void)
+{
+#if defined(TIF_NOTIFY_SIGNAL)
+ clear_thread_flag(TIF_NOTIFY_SIGNAL);
+ smp_mb__after_atomic();
+ if (current->task_works)
+ task_work_run();
+#endif
+}
+
+/*
+ * Called when we have work to process from exit_to_user_mode_loop()
+ */
+static inline void set_notify_signal(struct task_struct *task)
+{
+#if defined(TIF_NOTIFY_SIGNAL)
+ if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
+ !wake_up_state(task, TASK_INTERRUPTIBLE))
+ kick_process(task);
+#endif
+}
+
#endif /* <linux/tracehook.h> */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index a99e9b8e4e31..c873f475f0a7 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -303,9 +303,12 @@ struct tty_struct {
spinlock_t flow_lock;
/* Termios values are protected by the termios rwsem */
struct ktermios termios, termios_locked;
- struct termiox *termiox; /* May be NULL for unsupported */
char name[64];
struct pid *pgrp; /* Protected by ctrl lock */
+ /*
+ * Writes protected by both ctrl lock and legacy mutex, readers must use
+ * at least one of them.
+ */
struct pid *session;
unsigned long flags;
int count;
@@ -605,82 +608,64 @@ static inline struct tty_port *tty_port_get(struct tty_port *port)
}
/* If the cts flow control is enabled, return true. */
-static inline bool tty_port_cts_enabled(struct tty_port *port)
+static inline bool tty_port_cts_enabled(const struct tty_port *port)
{
return test_bit(TTY_PORT_CTS_FLOW, &port->iflags);
}
static inline void tty_port_set_cts_flow(struct tty_port *port, bool val)
{
- if (val)
- set_bit(TTY_PORT_CTS_FLOW, &port->iflags);
- else
- clear_bit(TTY_PORT_CTS_FLOW, &port->iflags);
+ assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val);
}
-static inline bool tty_port_active(struct tty_port *port)
+static inline bool tty_port_active(const struct tty_port *port)
{
return test_bit(TTY_PORT_ACTIVE, &port->iflags);
}
static inline void tty_port_set_active(struct tty_port *port, bool val)
{
- if (val)
- set_bit(TTY_PORT_ACTIVE, &port->iflags);
- else
- clear_bit(TTY_PORT_ACTIVE, &port->iflags);
+ assign_bit(TTY_PORT_ACTIVE, &port->iflags, val);
}
-static inline bool tty_port_check_carrier(struct tty_port *port)
+static inline bool tty_port_check_carrier(const struct tty_port *port)
{
return test_bit(TTY_PORT_CHECK_CD, &port->iflags);
}
static inline void tty_port_set_check_carrier(struct tty_port *port, bool val)
{
- if (val)
- set_bit(TTY_PORT_CHECK_CD, &port->iflags);
- else
- clear_bit(TTY_PORT_CHECK_CD, &port->iflags);
+ assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val);
}
-static inline bool tty_port_suspended(struct tty_port *port)
+static inline bool tty_port_suspended(const struct tty_port *port)
{
return test_bit(TTY_PORT_SUSPENDED, &port->iflags);
}
static inline void tty_port_set_suspended(struct tty_port *port, bool val)
{
- if (val)
- set_bit(TTY_PORT_SUSPENDED, &port->iflags);
- else
- clear_bit(TTY_PORT_SUSPENDED, &port->iflags);
+ assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val);
}
-static inline bool tty_port_initialized(struct tty_port *port)
+static inline bool tty_port_initialized(const struct tty_port *port)
{
return test_bit(TTY_PORT_INITIALIZED, &port->iflags);
}
static inline void tty_port_set_initialized(struct tty_port *port, bool val)
{
- if (val)
- set_bit(TTY_PORT_INITIALIZED, &port->iflags);
- else
- clear_bit(TTY_PORT_INITIALIZED, &port->iflags);
+ assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val);
}
-static inline bool tty_port_kopened(struct tty_port *port)
+static inline bool tty_port_kopened(const struct tty_port *port)
{
return test_bit(TTY_PORT_KOPENED, &port->iflags);
}
static inline void tty_port_set_kopened(struct tty_port *port, bool val)
{
- if (val)
- set_bit(TTY_PORT_KOPENED, &port->iflags);
- else
- clear_bit(TTY_PORT_KOPENED, &port->iflags);
+ assign_bit(TTY_PORT_KOPENED, &port->iflags, val);
}
extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
@@ -716,6 +701,7 @@ extern int __must_check tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
char *f, int count);
+extern void tty_sysctl_init(void);
/* n_tty.c */
extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 358446247ccd..61c3372d3f32 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -224,14 +224,6 @@
* line). See tty_do_resize() if you need to wrap the standard method
* in your own logic - the usual case.
*
- * void (*set_termiox)(struct tty_struct *tty, struct termiox *new);
- *
- * Called when the device receives a termiox based ioctl. Passes down
- * the requested data from user space. This method will not be invoked
- * unless the tty also has a valid tty->termiox pointer.
- *
- * Optional: Called under the termios lock
- *
* int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount);
*
* Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
@@ -285,7 +277,6 @@ struct tty_operations {
int (*tiocmset)(struct tty_struct *tty,
unsigned int set, unsigned int clear);
int (*resize)(struct tty_struct *tty, struct winsize *ws);
- int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
int (*get_icount)(struct tty_struct *tty,
struct serial_icounter_struct *icount);
int (*get_serial)(struct tty_struct *tty, struct serial_struct *p);
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 54bf6b118401..47c5962b876b 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -117,6 +117,14 @@ extern int __must_check
struct uio_info *info);
/* use a define to avoid include chaining to get THIS_MODULE */
+
+/**
+ * uio_register_device - register a new userspace IO device
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
#define uio_register_device(parent, info) \
__uio_register_device(THIS_MODULE, parent, info)
@@ -129,6 +137,14 @@ extern int __must_check
struct uio_info *info);
/* use a define to avoid include chaining to get THIS_MODULE */
+
+/**
+ * devm_uio_register_device - Resource managed uio_register_device()
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
#define devm_uio_register_device(parent, info) \
__devm_uio_register_device(THIS_MODULE, parent, info)
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 3dbb42c637c1..96281cd50ff6 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -734,10 +734,6 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
/* random stuff */
-#define RUN_CONTEXT (in_irq() ? "in_irq" \
- : (in_interrupt() ? "in_interrupt" : "can sleep"))
-
-
/* This rwsem is for use only by the hub driver and ehci-hcd.
* Nobody else should touch it.
*/
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index 3a805e2ecbc9..bb9a782e1411 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -466,6 +466,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_DRP_SRC 30
#define PD_T_PS_SOURCE_OFF 920
#define PD_T_PS_SOURCE_ON 480
+#define PD_T_PS_SOURCE_ON_PRS 450 /* 390 - 480ms */
#define PD_T_PS_HARD_RESET 30
#define PD_T_SRC_RECOVER 760
#define PD_T_SRC_RECOVER_MAX 1000
@@ -484,6 +485,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_CC_DEBOUNCE 200 /* 100 - 200 ms */
#define PD_T_PD_DEBOUNCE 20 /* 10 - 20 ms */
+#define PD_T_TRY_CC_DEBOUNCE 15 /* 10 - 20 ms */
#define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
#define PD_N_HARD_RESET_COUNT 2
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index 68bdc4e2f5a9..8c08eeb9a74b 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -103,17 +103,25 @@
* --------------------
* <31> :: data capable as a USB host
* <30> :: data capable as a USB device
- * <29:27> :: product type
+ * <29:27> :: product type (UFP / Cable)
* <26> :: modal operation supported (1b == yes)
- * <25:16> :: Reserved, Shall be set to zero
+ * <25:16> :: product type (DFP)
* <15:0> :: USB-IF assigned VID for this cable vendor
*/
#define IDH_PTYPE_UNDEF 0
#define IDH_PTYPE_HUB 1
#define IDH_PTYPE_PERIPH 2
+#define IDH_PTYPE_PSD 3
+#define IDH_PTYPE_AMA 5
+
#define IDH_PTYPE_PCABLE 3
#define IDH_PTYPE_ACABLE 4
-#define IDH_PTYPE_AMA 5
+
+#define IDH_PTYPE_DFP_UNDEF 0
+#define IDH_PTYPE_DFP_HUB 1
+#define IDH_PTYPE_DFP_HOST 2
+#define IDH_PTYPE_DFP_PB 3
+#define IDH_PTYPE_DFP_AMC 4
#define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \
((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \
@@ -122,6 +130,7 @@
#define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7)
#define PD_IDH_VID(vdo) ((vdo) & 0xffff)
#define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26))
+#define PD_IDH_DFP_PTYPE(vdo) (((vdo) >> 23) & 0x7)
/*
* Cert Stat VDO
@@ -177,7 +186,7 @@
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
* <23:20> :: Reserved, Shall be set to zero
- * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C)
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
* <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle)
* <16:13> :: cable latency (0001 == <10ns(~1m length))
* <12:11> :: cable termination type (11b == both ends active VCONN req)
@@ -193,6 +202,7 @@
#define CABLE_ATYPE 0
#define CABLE_BTYPE 1
#define CABLE_CTYPE 2
+#define CABLE_CAPTIVE 3
#define CABLE_PLUG 0
#define CABLE_RECEPTACLE 1
#define CABLE_CURR_1A5 0
@@ -208,6 +218,7 @@
| (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \
| ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \
| ((usbss) & 0x7))
+#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3)
/*
* AMA VDO
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
new file mode 100644
index 000000000000..20d88b1defc3
--- /dev/null
+++ b/include/linux/usb/r8152.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Realtek Semiconductor Corp. All rights reserved.
+ */
+
+#ifndef __LINUX_R8152_H
+#define __LINUX_R8152_H
+
+#define RTL8152_REQT_READ 0xc0
+#define RTL8152_REQT_WRITE 0x40
+#define RTL8152_REQ_GET_REGS 0x05
+#define RTL8152_REQ_SET_REGS 0x05
+
+#define BYTE_EN_DWORD 0xff
+#define BYTE_EN_WORD 0x33
+#define BYTE_EN_BYTE 0x11
+#define BYTE_EN_SIX_BYTES 0x3f
+#define BYTE_EN_START_MASK 0x0f
+#define BYTE_EN_END_MASK 0xf0
+
+#define MCU_TYPE_PLA 0x0100
+#define MCU_TYPE_USB 0x0000
+
+/* Define these values to match your device */
+#define VENDOR_ID_REALTEK 0x0bda
+#define VENDOR_ID_MICROSOFT 0x045e
+#define VENDOR_ID_SAMSUNG 0x04e8
+#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
+#define VENDOR_ID_NVIDIA 0x0955
+#define VENDOR_ID_TPLINK 0x2357
+
+#if IS_REACHABLE(CONFIG_USB_RTL8152)
+extern u8 rtl8152_get_version(struct usb_interface *intf);
+#endif
+
+#endif /* __LINUX_R8152_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 8e67eff9039f..1c09b922f8b0 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -62,7 +62,6 @@
* @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
* port.
* @flags: usb serial port flags
- * @write_wait: a wait_queue_head_t used by the port.
* @work: work queue entry for the line discipline waking up.
* @dev: pointer to the serial device
*
@@ -108,7 +107,6 @@ struct usb_serial_port {
int tx_bytes;
unsigned long flags;
- wait_queue_head_t write_wait;
struct work_struct work;
unsigned long sysrq; /* sysrq timeout */
struct device dev;
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 09762d26fa0c..f4a18427f5c4 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -83,6 +83,27 @@ enum tcpm_transmit_type {
* Optional; Called to enable/disable PD 3.0 fast role swap.
* Enabling frs is accessory dependent as not all PD3.0
* accessories support fast role swap.
+ * @frs_sourcing_vbus:
+ * Optional; Called to notify that vbus is now being sourced.
+ * Low level drivers can perform chip specific operations, if any.
+ * @enable_auto_vbus_discharge:
+ * Optional; TCPCI spec based TCPC implementations can optionally
+ * support hardware to autonomously dischrge vbus upon disconnecting
+ * as sink or source. TCPM signals TCPC to enable the mechanism upon
+ * entering connected state and signals disabling upon disconnect.
+ * @set_auto_vbus_discharge_threshold:
+ * Mandatory when enable_auto_vbus_discharge is implemented. TCPM
+ * calls this function to allow lower levels drivers to program the
+ * vbus threshold voltage below which the vbus discharge circuit
+ * will be turned on. requested_vbus_voltage is set to 0 when vbus
+ * is going to disappear knowingly i.e. during PR_SWAP and
+ * HARD_RESET etc.
+ * @is_vbus_vsafe0v:
+ * Optional; TCPCI spec based TCPC implementations are expected to
+ * detect VSAFE0V voltage level at vbus. When detection of VSAFE0V
+ * is supported by TCPC, set this callback for TCPM to query
+ * whether vbus is at VSAFE0V when needed.
+ * Returns true when vbus is at VSAFE0V, false otherwise.
*/
struct tcpc_dev {
struct fwnode_handle *fwnode;
@@ -106,9 +127,14 @@ struct tcpc_dev {
enum typec_cc_status cc);
int (*try_role)(struct tcpc_dev *dev, int role);
int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
- const struct pd_message *msg);
+ const struct pd_message *msg, unsigned int negotiated_rev);
int (*set_bist_data)(struct tcpc_dev *dev, bool on);
int (*enable_frs)(struct tcpc_dev *dev, bool enable);
+ void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
+ int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
+ int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+ bool pps_active, u32 requested_vbus_voltage);
+ bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
};
struct tcpm_port;
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 6be558045942..54475323f83b 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -126,9 +126,11 @@ struct typec_altmode_desc {
enum typec_port_data roles;
};
+int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes);
struct typec_altmode
*typec_partner_register_altmode(struct typec_partner *partner,
const struct typec_altmode_desc *desc);
+int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes);
struct typec_altmode
*typec_plug_register_altmode(struct typec_plug *plug,
const struct typec_altmode_desc *desc);
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index 47c2d501ddce..63dd44b72e0c 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -39,12 +39,16 @@ struct typec_thunderbolt_data {
#define TBT_CABLE_USB3_GEN1 1
#define TBT_CABLE_USB3_PASSIVE 2
#define TBT_CABLE_10_AND_20GBPS 3
-#define TBT_CABLE_ROUNDED BIT(19)
+#define TBT_CABLE_ROUNDED_SUPPORT(_vdo_) \
+ (((_vdo_) & GENMASK(20, 19)) >> 19)
+#define TBT_GEN3_NON_ROUNDED 0
+#define TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED 1
#define TBT_CABLE_OPTICAL BIT(21)
#define TBT_CABLE_RETIMER BIT(22)
#define TBT_CABLE_LINK_TRAINING BIT(23)
#define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16)
+#define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19)
/* TBT3 Device Enter Mode VDO bits */
#define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s)
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 2e4f7721fc4e..88a7673894d5 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -65,8 +65,6 @@ struct usbnet {
struct usb_anchor deferred;
struct tasklet_struct bh;
- struct pcpu_sw_netstats __percpu *stats64;
-
struct work_struct kevent;
unsigned long flags;
# define EVENT_TX_HALT 0
@@ -285,7 +283,5 @@ extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
extern void usbnet_status_stop(struct usbnet *dev);
extern void usbnet_update_max_qlen(struct usbnet *dev);
-extern void usbnet_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 4a19ac3f24d0..6b03fdd69d27 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -84,6 +84,8 @@
/* Cannot handle REPORT_LUNS */ \
US_FLAG(ALWAYS_SYNC, 0x20000000) \
/* lies about caching, so always sync */ \
+ US_FLAG(NO_SAME, 0x40000000) \
+ /* Cannot handle WRITE_SAME */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 6ef1c7109fc4..64cf8ebdc4ec 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -57,7 +57,6 @@ struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
struct uid_gid_map projid_map;
- atomic_t count;
struct user_namespace *parent;
int level;
kuid_t owner;
@@ -109,7 +108,7 @@ void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
return ns;
}
@@ -119,7 +118,7 @@ extern void __put_user_ns(struct user_namespace *ns);
static inline void put_user_ns(struct user_namespace *ns)
{
- if (ns && atomic_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->ns.count))
__put_user_ns(ns);
}
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 44429d9142ca..2b1737c9b244 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -4,7 +4,6 @@
#include <linux/sched.h>
-#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
@@ -22,7 +21,6 @@ struct user_namespace;
extern struct user_namespace init_user_ns;
struct uts_namespace {
- struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -33,16 +31,17 @@ extern struct uts_namespace init_uts_ns;
#ifdef CONFIG_UTS_NS
static inline void get_uts_ns(struct uts_namespace *ns)
{
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
}
extern struct uts_namespace *copy_utsname(unsigned long flags,
struct user_namespace *user_ns, struct uts_namespace *old_ns);
-extern void free_uts_ns(struct kref *kref);
+extern void free_uts_ns(struct uts_namespace *ns);
static inline void put_uts_ns(struct uts_namespace *ns)
{
- kref_put(&ns->kref, free_uts_ns);
+ if (refcount_dec_and_test(&ns->ns.count))
+ free_uts_ns(ns);
}
void uts_ns_init(void);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 938eaf9517e2..80c0181c411d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -72,16 +72,14 @@ struct vmap_area {
struct list_head list; /* address sorted list */
/*
- * The following three variables can be packed, because
- * a vmap_area object is always one of the three states:
+ * The following two variables can be packed, because
+ * a vmap_area object can be either:
* 1) in "free" tree (root is vmap_area_root)
- * 2) in "busy" tree (root is free_vmap_area_root)
- * 3) in purge list (head is vmap_purge_list)
+ * 2) or "busy" tree (root is free_vmap_area_root)
*/
union {
unsigned long subtree_max_size; /* in "free" tree */
struct vm_struct *vm; /* in "busy" tree */
- struct llist_node purge_list; /* in purge list */
};
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 322dcbfcc933..773135fc6e19 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -450,4 +450,108 @@ static inline const char *vm_event_name(enum vm_event_item item)
}
#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
+#ifdef CONFIG_MEMCG
+
+void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ int val);
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __mod_lruvec_state(lruvec, idx, val);
+ local_irq_restore(flags);
+}
+
+void __mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val);
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __mod_lruvec_page_state(page, idx, val);
+ local_irq_restore(flags);
+}
+
+#else
+
+static inline void __mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void __mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(page_pgdat(page), idx, val);
+}
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(page_pgdat(page), idx, val);
+}
+
+#endif /* CONFIG_MEMCG */
+
+static inline void __inc_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_state(lruvec, idx, 1);
+}
+
+static inline void __dec_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_state(lruvec, idx, -1);
+}
+
+static inline void __inc_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_page_state(page, idx, 1);
+}
+
+static inline void __dec_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_page_state(page, idx, -1);
+}
+
+static inline void inc_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ mod_lruvec_state(lruvec, idx, 1);
+}
+
+static inline void dec_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ mod_lruvec_state(lruvec, idx, -1);
+}
+
+static inline void inc_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ mod_lruvec_page_state(page, idx, 1);
+}
+
+static inline void dec_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ mod_lruvec_page_state(page, idx, -1);
+}
+
#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h
deleted file mode 100644
index cdae052bcdcd..000000000000
--- a/include/linux/wimax/debug.h
+++ /dev/null
@@ -1,491 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Linux WiMAX
- * Collection of tools to manage debug operations.
- *
- * Copyright (C) 2005-2007 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Don't #include this file directly, read on!
- *
- * EXECUTING DEBUGGING ACTIONS OR NOT
- *
- * The main thing this framework provides is decission power to take a
- * debug action (like printing a message) if the current debug level
- * allows it.
- *
- * The decission power is at two levels: at compile-time (what does
- * not make it is compiled out) and at run-time. The run-time
- * selection is done per-submodule (as they are declared by the user
- * of the framework).
- *
- * A call to d_test(L) (L being the target debug level) returns true
- * if the action should be taken because the current debug levels
- * allow it (both compile and run time).
- *
- * It follows that a call to d_test() that can be determined to be
- * always false at compile time will get the code depending on it
- * compiled out by optimization.
- *
- * DEBUG LEVELS
- *
- * It is up to the caller to define how much a debugging level is.
- *
- * Convention sets 0 as "no debug" (so an action marked as debug level 0
- * will always be taken). The increasing debug levels are used for
- * increased verbosity.
- *
- * USAGE
- *
- * Group the code in modules and submodules inside each module [which
- * in most cases maps to Linux modules and .c files that compose
- * those].
- *
- * For each module, there is:
- *
- * - a MODULENAME (single word, legal C identifier)
- *
- * - a debug-levels.h header file that declares the list of
- * submodules and that is included by all .c files that use
- * the debugging tools. The file name can be anything.
- *
- * - some (optional) .c code to manipulate the runtime debug levels
- * through debugfs.
- *
- * The debug-levels.h file would look like:
- *
- * #ifndef __debug_levels__h__
- * #define __debug_levels__h__
- *
- * #define D_MODULENAME modulename
- * #define D_MASTER 10
- *
- * #include <linux/wimax/debug.h>
- *
- * enum d_module {
- * D_SUBMODULE_DECLARE(submodule_1),
- * D_SUBMODULE_DECLARE(submodule_2),
- * ...
- * D_SUBMODULE_DECLARE(submodule_N)
- * };
- *
- * #endif
- *
- * D_MASTER is the maximum compile-time debug level; any debug actions
- * above this will be out. D_MODULENAME is the module name (legal C
- * identifier), which has to be unique for each module (to avoid
- * namespace collisions during linkage). Note those #defines need to
- * be done before #including debug.h
- *
- * We declare N different submodules whose debug level can be
- * independently controlled during runtime.
- *
- * In a .c file of the module (and only in one of them), define the
- * following code:
- *
- * struct d_level D_LEVEL[] = {
- * D_SUBMODULE_DEFINE(submodule_1),
- * D_SUBMODULE_DEFINE(submodule_2),
- * ...
- * D_SUBMODULE_DEFINE(submodule_N),
- * };
- * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
- *
- * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used
- * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros
- * #defined also in this file.
- *
- * To manipulate from user space the levels, create a debugfs dentry
- * and then register each submodule with:
- *
- * d_level_register_debugfs("PREFIX_", submodule_X, parent);
- *
- * Where PREFIX_ is a name of your chosing. This will create debugfs
- * file with a single numeric value that can be use to tweak it. To
- * remove the entires, just use debugfs_remove_recursive() on 'parent'.
- *
- * NOTE: remember that even if this will show attached to some
- * particular instance of a device, the settings are *global*.
- *
- * On each submodule (for example, .c files), the debug infrastructure
- * should be included like this:
- *
- * #define D_SUBMODULE submodule_x // matches one in debug-levels.h
- * #include "debug-levels.h"
- *
- * after #including all your include files.
- *
- * Now you can use the d_*() macros below [d_test(), d_fnstart(),
- * d_fnend(), d_printf(), d_dump()].
- *
- * If their debug level is greater than D_MASTER, they will be
- * compiled out.
- *
- * If their debug level is lower or equal than D_MASTER but greater
- * than the current debug level of their submodule, they'll be
- * ignored.
- *
- * Otherwise, the action will be performed.
- */
-#ifndef __debug__h__
-#define __debug__h__
-
-#include <linux/types.h>
-#include <linux/slab.h>
-
-struct device;
-
-/* Backend stuff */
-
-/*
- * Debug backend: generate a message header from a 'struct device'
- *
- * @head: buffer where to place the header
- * @head_size: length of @head
- * @dev: pointer to device used to generate a header from. If NULL,
- * an empty ("") header is generated.
- */
-static inline
-void __d_head(char *head, size_t head_size,
- struct device *dev)
-{
- if (dev == NULL)
- head[0] = 0;
- else if ((unsigned long)dev < 4096) {
- printk(KERN_ERR "E: Corrupt dev %p\n", dev);
- WARN_ON(1);
- } else
- snprintf(head, head_size, "%s %s: ",
- dev_driver_string(dev), dev_name(dev));
-}
-
-
-/*
- * Debug backend: log some message if debugging is enabled
- *
- * @l: intended debug level
- * @tag: tag to prefix the message with
- * @dev: 'struct device' associated to this message
- * @f: printf-like format and arguments
- *
- * Note this is optimized out if it doesn't pass the compile-time
- * check; however, it is *always* compiled. This is useful to make
- * sure the printf-like formats and variables are always checked and
- * they don't get bit rot if you have all the debugging disabled.
- */
-#define _d_printf(l, tag, dev, f, a...) \
-do { \
- char head[64]; \
- if (!d_test(l)) \
- break; \
- __d_head(head, sizeof(head), dev); \
- printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \
-} while (0)
-
-
-/*
- * CPP syntactic sugar to generate A_B like symbol names when one of
- * the arguments is a preprocessor #define.
- */
-#define __D_PASTE__(varname, modulename) varname##_##modulename
-#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
-#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name))
-
-
-/*
- * Store a submodule's runtime debug level and name
- */
-struct d_level {
- u8 level;
- const char *name;
-};
-
-
-/*
- * List of available submodules and their debug levels
- *
- * We call them d_level_MODULENAME and d_level_size_MODULENAME; the
- * macros D_LEVEL and D_LEVEL_SIZE contain the name already for
- * convenience.
- *
- * This array and the size are defined on some .c file that is part of
- * the current module.
- */
-#define D_LEVEL __D_PASTE(d_level, D_MODULENAME)
-#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME)
-
-extern struct d_level D_LEVEL[];
-extern size_t D_LEVEL_SIZE;
-
-
-/*
- * Frontend stuff
- *
- *
- * Stuff you need to declare prior to using the actual "debug" actions
- * (defined below).
- */
-
-#ifndef D_MODULENAME
-#error D_MODULENAME is not defined in your debug-levels.h file
-/**
- * D_MODULE - Name of the current module
- *
- * #define in your module's debug-levels.h, making sure it is
- * unique. This has to be a legal C identifier.
- */
-#define D_MODULENAME undefined_modulename
-#endif
-
-
-#ifndef D_MASTER
-#warning D_MASTER not defined, but debug.h included! [see docs]
-/**
- * D_MASTER - Compile time maximum debug level
- *
- * #define in your debug-levels.h file to the maximum debug level the
- * runtime code will be allowed to have. This allows you to provide a
- * main knob.
- *
- * Anything above that level will be optimized out of the compile.
- *
- * Defaults to zero (no debug code compiled in).
- *
- * Maximum one definition per module (at the debug-levels.h file).
- */
-#define D_MASTER 0
-#endif
-
-#ifndef D_SUBMODULE
-#error D_SUBMODULE not defined, but debug.h included! [see docs]
-/**
- * D_SUBMODULE - Name of the current submodule
- *
- * #define in your submodule .c file before #including debug-levels.h
- * to the name of the current submodule as previously declared and
- * defined with D_SUBMODULE_DECLARE() (in your module's
- * debug-levels.h) and D_SUBMODULE_DEFINE().
- *
- * This is used to provide runtime-control over the debug levels.
- *
- * Maximum one per .c file! Can be shared among different .c files
- * (meaning they belong to the same submodule categorization).
- */
-#define D_SUBMODULE undefined_module
-#endif
-
-
-/**
- * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control
- *
- * @_name: name of the submodule, restricted to the chars that make up a
- * valid C identifier ([a-zA-Z0-9_]).
- *
- * Declare in the module's debug-levels.h header file as:
- *
- * enum d_module {
- * D_SUBMODULE_DECLARE(submodule_1),
- * D_SUBMODULE_DECLARE(submodule_2),
- * D_SUBMODULE_DECLARE(submodule_3),
- * };
- *
- * Some corresponding .c file needs to have a matching
- * D_SUBMODULE_DEFINE().
- */
-#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name
-
-
-/**
- * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control
- *
- * @_name: name of the submodule, restricted to the chars that make up a
- * valid C identifier ([a-zA-Z0-9_]).
- *
- * Use once per module (in some .c file) as:
- *
- * static
- * struct d_level d_level_SUBMODULENAME[] = {
- * D_SUBMODULE_DEFINE(submodule_1),
- * D_SUBMODULE_DEFINE(submodule_2),
- * D_SUBMODULE_DEFINE(submodule_3),
- * };
- * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME);
- *
- * Matching D_SUBMODULE_DECLARE()s have to be present in a
- * debug-levels.h header file.
- */
-#define D_SUBMODULE_DEFINE(_name) \
-[__D_SUBMODULE_##_name] = { \
- .level = 0, \
- .name = #_name \
-}
-
-
-
-/* The actual "debug" operations */
-
-
-/**
- * d_test - Returns true if debugging should be enabled
- *
- * @l: intended debug level (unsigned)
- *
- * If the master debug switch is enabled and the current settings are
- * higher or equal to the requested level, then debugging
- * output/actions should be enabled.
- *
- * NOTE:
- *
- * This needs to be coded so that it can be evaluated in compile
- * time; this is why the ugly BUG_ON() is placed in there, so the
- * D_MASTER evaluation compiles all out if it is compile-time false.
- */
-#define d_test(l) \
-({ \
- unsigned __l = l; /* type enforcer */ \
- (D_MASTER) >= __l \
- && ({ \
- BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\
- D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \
- }); \
-})
-
-
-/**
- * d_fnstart - log message at function start if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a)
-
-
-/**
- * d_fnend - log message at function end if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a)
-
-
-/**
- * d_printf - log message if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a)
-
-
-/**
- * d_dump - log buffer hex dump if debugging enabled
- *
- * @l: intended debug level
- * @_dev: 'struct device' pointer, NULL if none (for context)
- * @f: printf-like format and arguments
- */
-#define d_dump(l, dev, ptr, size) \
-do { \
- char head[64]; \
- if (!d_test(l)) \
- break; \
- __d_head(head, sizeof(head), dev); \
- print_hex_dump(KERN_ERR, head, 0, 16, 1, \
- ((void *) ptr), (size), 0); \
-} while (0)
-
-
-/**
- * Export a submodule's debug level over debugfs as PREFIXSUBMODULE
- *
- * @prefix: string to prefix the name with
- * @submodule: name of submodule (not a string, just the name)
- * @dentry: debugfs parent dentry
- *
- * For removing, just use debugfs_remove_recursive() on the parent.
- */
-#define d_level_register_debugfs(prefix, name, parent) \
-({ \
- debugfs_create_u8( \
- prefix #name, 0600, parent, \
- &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \
-})
-
-
-static inline
-void d_submodule_set(struct d_level *d_level, size_t d_level_size,
- const char *submodule, u8 level, const char *tag)
-{
- struct d_level *itr, *top;
- int index = -1;
-
- for (itr = d_level, top = itr + d_level_size; itr < top; itr++) {
- index++;
- if (itr->name == NULL) {
- printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n",
- tag, itr, index);
- continue;
- }
- if (!strcmp(itr->name, submodule)) {
- itr->level = level;
- return;
- }
- }
- printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule);
-}
-
-
-/**
- * d_parse_params - Parse a string with debug parameters from the
- * command line
- *
- * @d_level: level structure (D_LEVEL)
- * @d_level_size: number of items in the level structure
- * (D_LEVEL_SIZE).
- * @_params: string with the parameters; this is a space (not tab!)
- * separated list of NAME:VALUE, where value is the debug level
- * and NAME is the name of the submodule.
- * @tag: string for error messages (example: MODULE.ARGNAME).
- */
-static inline
-void d_parse_params(struct d_level *d_level, size_t d_level_size,
- const char *_params, const char *tag)
-{
- char submodule[130], *params, *params_orig, *token, *colon;
- unsigned level, tokens;
-
- if (_params == NULL)
- return;
- params_orig = kstrdup(_params, GFP_KERNEL);
- params = params_orig;
- while (1) {
- token = strsep(&params, " ");
- if (token == NULL)
- break;
- if (*token == '\0') /* eat joint spaces */
- continue;
- /* kernel's sscanf %s eats until whitespace, so we
- * replace : by \n so it doesn't get eaten later by
- * strsep */
- colon = strchr(token, ':');
- if (colon != NULL)
- *colon = '\n';
- tokens = sscanf(token, "%s\n%u", submodule, &level);
- if (colon != NULL)
- *colon = ':'; /* set back, for error messages */
- if (tokens == 2)
- d_submodule_set(d_level, d_level_size,
- submodule, level, tag);
- else
- printk(KERN_ERR "%s: can't parse '%s' as a "
- "SUBMODULE:LEVEL (%d tokens)\n",
- tag, token, tokens);
- }
- kfree(params_orig);
-}
-
-#endif /* #ifndef __debug__h__ */
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 0fdbf653b173..4807ca4d52e0 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -20,7 +20,6 @@
* zsmalloc mapping modes
*
* NOTE: These only make a difference when a mapped object spans pages.
- * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected.
*/
enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */