summaryrefslogtreecommitdiff
path: root/target/linux/generic/backport-5.10/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-5.10/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch')
-rw-r--r--target/linux/generic/backport-5.10/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch134
1 files changed, 0 insertions, 134 deletions
diff --git a/target/linux/generic/backport-5.10/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch b/target/linux/generic/backport-5.10/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch
deleted file mode 100644
index 1e82308eaa..0000000000
--- a/target/linux/generic/backport-5.10/610-v5.13-57-netfilter-flowtable-Set-offload-timeouts-according-t.patch
+++ /dev/null
@@ -1,134 +0,0 @@
-From: Oz Shlomo <ozsh@nvidia.com>
-Date: Thu, 3 Jun 2021 15:12:35 +0300
-Subject: [PATCH] netfilter: flowtable: Set offload timeouts according to proto
- values
-
-Currently the aging period for tcp/udp connections is hard coded to
-30 seconds. Aged tcp/udp connections configure a hard coded 120/30
-seconds pickup timeout for conntrack.
-This configuration may be too aggressive or permissive for some users.
-
-Dynamically configure the nf flow table GC timeout intervals according
-to the user defined values.
-
-Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
-Reviewed-by: Paul Blakey <paulb@nvidia.com>
-Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
----
-
---- a/include/net/netfilter/nf_flow_table.h
-+++ b/include/net/netfilter/nf_flow_table.h
-@@ -174,6 +174,8 @@ struct flow_offload {
- #define NF_FLOW_TIMEOUT (30 * HZ)
- #define nf_flowtable_time_stamp (u32)jiffies
-
-+unsigned long flow_offload_get_timeout(struct flow_offload *flow);
-+
- static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
- {
- return (__s32)(timeout - nf_flowtable_time_stamp);
---- a/net/netfilter/nf_flow_table_core.c
-+++ b/net/netfilter/nf_flow_table_core.c
-@@ -175,12 +175,10 @@ static void flow_offload_fixup_tcp(struc
- tcp->seen[1].td_maxwin = 0;
- }
-
--#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
--#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
--
- static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
- {
- const struct nf_conntrack_l4proto *l4proto;
-+ struct net *net = nf_ct_net(ct);
- int l4num = nf_ct_protonum(ct);
- unsigned int timeout;
-
-@@ -188,12 +186,17 @@ static void flow_offload_fixup_ct_timeou
- if (!l4proto)
- return;
-
-- if (l4num == IPPROTO_TCP)
-- timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
-- else if (l4num == IPPROTO_UDP)
-- timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
-- else
-+ if (l4num == IPPROTO_TCP) {
-+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
-+
-+ timeout = tn->offload_pickup;
-+ } else if (l4num == IPPROTO_UDP) {
-+ struct nf_udp_net *tn = nf_udp_pernet(net);
-+
-+ timeout = tn->offload_pickup;
-+ } else {
- return;
-+ }
-
- if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
- WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
-@@ -265,11 +268,35 @@ static const struct rhashtable_params nf
- .automatic_shrinking = true,
- };
-
-+unsigned long flow_offload_get_timeout(struct flow_offload *flow)
-+{
-+ const struct nf_conntrack_l4proto *l4proto;
-+ unsigned long timeout = NF_FLOW_TIMEOUT;
-+ struct net *net = nf_ct_net(flow->ct);
-+ int l4num = nf_ct_protonum(flow->ct);
-+
-+ l4proto = nf_ct_l4proto_find(l4num);
-+ if (!l4proto)
-+ return timeout;
-+
-+ if (l4num == IPPROTO_TCP) {
-+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
-+
-+ timeout = tn->offload_timeout;
-+ } else if (l4num == IPPROTO_UDP) {
-+ struct nf_udp_net *tn = nf_udp_pernet(net);
-+
-+ timeout = tn->offload_timeout;
-+ }
-+
-+ return timeout;
-+}
-+
- int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
- {
- int err;
-
-- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
-+ flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
-
- err = rhashtable_insert_fast(&flow_table->rhashtable,
- &flow->tuplehash[0].node,
-@@ -301,7 +328,7 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
- void flow_offload_refresh(struct nf_flowtable *flow_table,
- struct flow_offload *flow)
- {
-- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
-+ flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
-
- if (likely(!nf_flowtable_hw_offload(flow_table)))
- return;
---- a/net/netfilter/nf_flow_table_offload.c
-+++ b/net/netfilter/nf_flow_table_offload.c
-@@ -885,7 +885,7 @@ static void flow_offload_work_stats(stru
-
- lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
- offload->flow->timeout = max_t(u64, offload->flow->timeout,
-- lastused + NF_FLOW_TIMEOUT);
-+ lastused + flow_offload_get_timeout(offload->flow));
-
- if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
- if (stats[0].pkts)
-@@ -989,7 +989,7 @@ void nf_flow_offload_stats(struct nf_flo
- __s32 delta;
-
- delta = nf_flow_timeout_delta(flow->timeout);
-- if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
-+ if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10))
- return;
-
- offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);