summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-11-18 16:04:28 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-11-18 16:04:28 +0100
commitad468bff7b143f2cd984c21cba71c647af873a21 (patch)
tree534e5e02ece39eb03a5978e182b664ffd8e5e8de
parent1970b14d543a54f2419e6bf143938e6e2a33ab6d (diff)
downloadlinux-rt-ad468bff7b143f2cd984c21cba71c647af873a21.tar.gz
[ANNOUNCE] v5.15.2-rt20v5.15.2-rt20-patches
Dear RT folks! I'm pleased to announce the v5.15.2-rt20 patch set. Changes since v5.15.2-rt19: - A patch from upstream to avoid a "larg stack frame" warning. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. - Valentin Schneider reported a few splats on ARM64, see https://lkml.kernel.org/r/20210810134127.1394269-1-valentin.schneider@arm.com The delta patch against v5.15.2-rt19 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.15/incr/patch-5.15.2-rt19-rt20.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.15.2-rt20 The RT patch against v5.15.2 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.15/older/patch-5.15.2-rt20.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.15/older/patches-5.15.2-rt20.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/net-sched-gred-dynamically-allocate-tc_gred_qopt_off.patch126
-rw-r--r--patches/series1
3 files changed, 128 insertions, 1 deletions
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index c0ab4419d1ec..2ffe0da65d7c 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt19
++-rt20
diff --git a/patches/net-sched-gred-dynamically-allocate-tc_gred_qopt_off.patch b/patches/net-sched-gred-dynamically-allocate-tc_gred_qopt_off.patch
new file mode 100644
index 000000000000..a5c6c3bd8266
--- /dev/null
+++ b/patches/net-sched-gred-dynamically-allocate-tc_gred_qopt_off.patch
@@ -0,0 +1,126 @@
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 26 Oct 2021 12:07:11 +0200
+Subject: [PATCH] net: sched: gred: dynamically allocate tc_gred_qopt_offload
+
+The tc_gred_qopt_offload structure has grown too big to be on the
+stack for 32-bit architectures after recent changes.
+
+net/sched/sch_gred.c:903:13: error: stack frame size (1180) exceeds limit (1024) in 'gred_destroy' [-Werror,-Wframe-larger-than]
+net/sched/sch_gred.c:310:13: error: stack frame size (1212) exceeds limit (1024) in 'gred_offload' [-Werror,-Wframe-larger-than]
+
+Use dynamic allocation per qdisc to avoid this.
+
+Fixes: 50dc9a8572aa ("net: sched: Merge Qdisc::bstats and Qdisc::cpu_bstats data types")
+Fixes: 67c9e6270f30 ("net: sched: Protect Qdisc::bstats with u64_stats")
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211026100711.nalhttf6mbe6sudx@linutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ net/sched/sch_gred.c | 50 ++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 30 insertions(+), 20 deletions(-)
+
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -56,6 +56,7 @@ struct gred_sched {
+ u32 DPs;
+ u32 def;
+ struct red_vars wred_set;
++ struct tc_gred_qopt_offload *opt;
+ };
+
+ static inline int gred_wred_mode(struct gred_sched *table)
+@@ -311,42 +312,43 @@ static void gred_offload(struct Qdisc *s
+ {
+ struct gred_sched *table = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+- struct tc_gred_qopt_offload opt = {
+- .command = command,
+- .handle = sch->handle,
+- .parent = sch->parent,
+- };
++ struct tc_gred_qopt_offload *opt = table->opt;
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return;
+
++ memset(opt, 0, sizeof(*opt));
++ opt->command = command;
++ opt->handle = sch->handle;
++ opt->parent = sch->parent;
++
+ if (command == TC_GRED_REPLACE) {
+ unsigned int i;
+
+- opt.set.grio_on = gred_rio_mode(table);
+- opt.set.wred_on = gred_wred_mode(table);
+- opt.set.dp_cnt = table->DPs;
+- opt.set.dp_def = table->def;
++ opt->set.grio_on = gred_rio_mode(table);
++ opt->set.wred_on = gred_wred_mode(table);
++ opt->set.dp_cnt = table->DPs;
++ opt->set.dp_def = table->def;
+
+ for (i = 0; i < table->DPs; i++) {
+ struct gred_sched_data *q = table->tab[i];
+
+ if (!q)
+ continue;
+- opt.set.tab[i].present = true;
+- opt.set.tab[i].limit = q->limit;
+- opt.set.tab[i].prio = q->prio;
+- opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
+- opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
+- opt.set.tab[i].is_ecn = gred_use_ecn(q);
+- opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
+- opt.set.tab[i].probability = q->parms.max_P;
+- opt.set.tab[i].backlog = &q->backlog;
++ opt->set.tab[i].present = true;
++ opt->set.tab[i].limit = q->limit;
++ opt->set.tab[i].prio = q->prio;
++ opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
++ opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
++ opt->set.tab[i].is_ecn = gred_use_ecn(q);
++ opt->set.tab[i].is_harddrop = gred_use_harddrop(q);
++ opt->set.tab[i].probability = q->parms.max_P;
++ opt->set.tab[i].backlog = &q->backlog;
+ }
+- opt.set.qstats = &sch->qstats;
++ opt->set.qstats = &sch->qstats;
+ }
+
+- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
++ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt);
+ }
+
+ static int gred_offload_dump_stats(struct Qdisc *sch)
+@@ -731,6 +733,7 @@ static int gred_change(struct Qdisc *sch
+ static int gred_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
++ struct gred_sched *table = qdisc_priv(sch);
+ struct nlattr *tb[TCA_GRED_MAX + 1];
+ int err;
+
+@@ -754,6 +757,12 @@ static int gred_init(struct Qdisc *sch,
+ sch->limit = qdisc_dev(sch)->tx_queue_len
+ * psched_mtu(qdisc_dev(sch));
+
++ if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) {
++ table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL);
++ if (!table->opt)
++ return -ENOMEM;
++ }
++
+ return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
+ }
+
+@@ -910,6 +919,7 @@ static void gred_destroy(struct Qdisc *s
+ gred_destroy_vq(table->tab[i]);
+ }
+ gred_offload(sch, TC_GRED_DESTROY);
++ kfree(table->opt);
+ }
+
+ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
diff --git a/patches/series b/patches/series
index 04ff72f08b60..49ceadffa1df 100644
--- a/patches/series
+++ b/patches/series
@@ -68,6 +68,7 @@ net-sched-Allow-statistics-reads-from-softirq.patch
net-sched-fix-logic-error-in-qdisc_run_begin.patch
net-sched-remove-one-pair-of-atomic-operations.patch
net-stats-Read-the-statistics-in-___gnet_stats_copy_.patch
+net-sched-gred-dynamically-allocate-tc_gred_qopt_off.patch
# tip, irqwork
0001_sched_rt_annotate_the_rt_balancing_logic_irqwork_as_irq_work_hard_irq.patch