summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-10-15 17:09:02 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-10-15 17:09:02 +0200
commite535fbfd36692fe929e04b32ee629a95775226a2 (patch)
tree4258ece830ac833c9ca6db313b2fe8de2c9533dc
parenteda0e9957c15a825c9f19e511d78e79e4867df41 (diff)
downloadlinux-rt-e535fbfd36692fe929e04b32ee629a95775226a2.tar.gz
[ANNOUNCE] v5.9-rt16v5.9-rt16-patches
Dear RT folks! I'm pleased to announce the v5.9-rt16 patch set. Changes since v5.9-rt15: - The rtmutex rework resulted in "unused variable" warnings if built without lockdep. Reported by Mike Galbraith. - The softirq rework led to a warning which could be triggered by the `ss' tool. Reported by Mike Galbraith. - The migrate-disable rework removed a data from a trace-event. Adding it bad resulted in a compile failure for !RT. Patch by Clark Williams. Known issues - It has been pointed out that due to changes to the printk code the internal buffer representation changed. This is only an issue if tools like `crash' are used to extract the printk buffer from a kernel memory image. The delta patch against v5.9-rt15 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.9/incr/patch-5.9-rt15-rt16.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.9-rt16 The RT patch against v5.9 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patch-5.9-rt16.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0016-locking-rtmutex-add-sleeping-lock-implementation.patch14
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch28
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/preempt-lazy-support.patch10
-rw-r--r--patches/series1
-rw-r--r--patches/tcp-Remove-superfluous-BH-disable-around-listening_h.patch103
6 files changed, 139 insertions, 19 deletions
diff --git a/patches/0016-locking-rtmutex-add-sleeping-lock-implementation.patch b/patches/0016-locking-rtmutex-add-sleeping-lock-implementation.patch
index 13320e483313..5147454efed8 100644
--- a/patches/0016-locking-rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/0016-locking-rtmutex-add-sleeping-lock-implementation.patch
@@ -225,10 +225,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+
-+# define spin_lock_nest_lock(lock, nest_lock) \
++# define spin_lock_nest_lock(lock, subclass) \
+ do { \
-+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
-+ rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
++ typecheck(struct lockdep_map *, &(subclass)->dep_map); \
++ rt_spin_lock_nest_lock(lock, &(subclass)->dep_map); \
+ } while (0)
+
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
@@ -238,15 +238,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+#else
-+# define spin_lock_nested(lock, subclass) spin_lock(lock)
-+# define spin_lock_nest_lock(lock, nest_lock) spin_lock(lock)
-+# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock)
++# define spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
++# define spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
++# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(((void)(subclass), (lock)))
+
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
-+ spin_lock(lock); \
++ spin_lock(((void)(subclass), (lock))); \
+ } while (0)
+#endif
+
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index ca08691b6957..9e0c0153f295 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -5,10 +5,10 @@ Subject: trace: Add migrate-disabled counter to tracing output
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/trace_events.h | 1 +
- kernel/trace/trace.c | 16 ++++++++++------
+ kernel/trace/trace.c | 25 +++++++++++++++++++------
kernel/trace/trace_events.c | 1 +
kernel/trace/trace_output.c | 5 +++++
- 4 files changed, 17 insertions(+), 6 deletions(-)
+ 4 files changed, 26 insertions(+), 6 deletions(-)
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -22,16 +22,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2457,6 +2457,8 @@ tracing_generic_entry_update(struct trac
+@@ -2437,6 +2437,15 @@ enum print_line_t trace_handle_return(st
+ }
+ EXPORT_SYMBOL_GPL(trace_handle_return);
+
++static unsigned short migration_disable_value(struct task_struct *tsk)
++{
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
++ return tsk ? tsk->migration_disabled : 0;
++#else
++ return 0;
++#endif
++}
++
+ void
+ tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
+ unsigned long flags, int pc)
+@@ -2457,6 +2466,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
+
-+ entry->migrate_disable = (tsk) ? tsk->migration_disabled & 0xFF : 0;
++ entry->migrate_disable = migration_disable_value(tsk);
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -3789,9 +3791,10 @@ static void print_lat_help_header(struct
+@@ -3789,9 +3800,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
@@ -45,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -3829,9 +3832,10 @@ static void print_func_help_header_irq(s
+@@ -3829,9 +3841,10 @@ static void print_func_help_header_irq(s
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 340816c8febc..0cccc7790a5d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt15
++-rt16
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 8cee64c5571d..ba62c99f49c5 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -484,7 +484,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2444,6 +2444,7 @@ tracing_generic_entry_update(struct trac
+@@ -2453,6 +2453,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -492,7 +492,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->type = type;
entry->flags =
-@@ -2455,7 +2456,8 @@ tracing_generic_entry_update(struct trac
+@@ -2464,7 +2465,8 @@ tracing_generic_entry_update(struct trac
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -501,8 +501,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
- entry->migrate_disable = (tsk) ? tsk->migration_disabled & 0xFF : 0;
-@@ -3786,15 +3788,17 @@ unsigned long trace_total_entries(struct
+ entry->migrate_disable = migration_disable_value(tsk);
+@@ -3795,15 +3797,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
@@ -529,7 +529,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -3828,14 +3832,16 @@ static void print_func_help_header_irq(s
+@@ -3837,14 +3841,16 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
diff --git a/patches/series b/patches/series
index 26a10ebbd376..1a9a9eb1e033 100644
--- a/patches/series
+++ b/patches/series
@@ -126,6 +126,7 @@ tpm-remove-tpm_dev_wq_lock.patch
############################################################
shmem-Use-raw_spinlock_t-for-stat_lock.patch
net--Move-lockdep-where-it-belongs.patch
+tcp-Remove-superfluous-BH-disable-around-listening_h.patch
# SoftIRQ
x86-fpu--Do-not-disable-BH-on-RT.patch
diff --git a/patches/tcp-Remove-superfluous-BH-disable-around-listening_h.patch b/patches/tcp-Remove-superfluous-BH-disable-around-listening_h.patch
new file mode 100644
index 000000000000..dc4f24c2d0d9
--- /dev/null
+++ b/patches/tcp-Remove-superfluous-BH-disable-around-listening_h.patch
@@ -0,0 +1,103 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 12 Oct 2020 17:33:54 +0200
+Subject: [PATCH] tcp: Remove superfluous BH-disable around listening_hash
+
+Commit
+ 9652dc2eb9e40 ("tcp: relax listening_hash operations")
+
+removed the need to disable bottom half while acquiring
+listening_hash.lock. There are still two callers left which disable
+bottom half before the lock is acquired.
+
+Drop local_bh_disable() around __inet_hash() which acquires
+listening_hash->lock, invoke inet_ehash_nolisten() with disabled BH.
+inet_unhash() conditionally acquires listening_hash->lock.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/ipv4/inet_hashtables.c | 19 ++++++++++++-------
+ net/ipv6/inet6_hashtables.c | 5 +----
+ 2 files changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 239e54474b653..fcb105cbb5465 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -585,7 +585,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ int err = 0;
+
+ if (sk->sk_state != TCP_LISTEN) {
++ local_bh_disable();
+ inet_ehash_nolisten(sk, osk);
++ local_bh_enable();
+ return 0;
+ }
+ WARN_ON(!sk_unhashed(sk));
+@@ -617,11 +619,8 @@ int inet_hash(struct sock *sk)
+ {
+ int err = 0;
+
+- if (sk->sk_state != TCP_CLOSE) {
+- local_bh_disable();
++ if (sk->sk_state != TCP_CLOSE)
+ err = __inet_hash(sk, NULL);
+- local_bh_enable();
+- }
+
+ return err;
+ }
+@@ -632,17 +631,20 @@ void inet_unhash(struct sock *sk)
+ struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+ struct inet_listen_hashbucket *ilb = NULL;
+ spinlock_t *lock;
++ bool state_listen;
+
+ if (sk_unhashed(sk))
+ return;
+
+ if (sk->sk_state == TCP_LISTEN) {
++ state_listen = true;
+ ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
+- lock = &ilb->lock;
++ spin_lock(&ilb->lock);
+ } else {
++ state_listen = false;
+ lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
++ spin_lock_bh(lock);
+ }
+- spin_lock_bh(lock);
+ if (sk_unhashed(sk))
+ goto unlock;
+
+@@ -655,7 +657,10 @@ void inet_unhash(struct sock *sk)
+ __sk_nulls_del_node_init_rcu(sk);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ unlock:
+- spin_unlock_bh(lock);
++ if (state_listen)
++ spin_unlock(&ilb->lock);
++ else
++ spin_unlock_bh(lock);
+ }
+ EXPORT_SYMBOL_GPL(inet_unhash);
+
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 2d3add9e61162..50fd17cbf3ec7 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -335,11 +335,8 @@ int inet6_hash(struct sock *sk)
+ {
+ int err = 0;
+
+- if (sk->sk_state != TCP_CLOSE) {
+- local_bh_disable();
++ if (sk->sk_state != TCP_CLOSE)
+ err = __inet_hash(sk, NULL);
+- local_bh_enable();
+- }
+
+ return err;
+ }
+--
+2.28.0
+