summaryrefslogtreecommitdiff
path: root/kernel/bpf
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-02-24 15:01:39 +0100
committerAlexei Starovoitov <ast@kernel.org>2020-02-24 16:18:20 -0800
commit8a37963c7ac9ecb7f86f8ebda020e3f8d6d7b8a0 (patch)
treeee849da409619592dbcaf62bdda95f53bcfdbb99 /kernel/bpf
parent1d7bf6b7d3e8353c3fac648f3f9b3010458570c2 (diff)
downloadlinux-next-8a37963c7ac9ecb7f86f8ebda020e3f8d6d7b8a0.tar.gz
bpf: Remove recursion prevention from rcu free callback
If an element is freed via RCU then recursion into BPF instrumentation functions is not a concern. The element is already detached from the map and the RCU callback does not hold any locks on which a kprobe, perf event or tracepoint attached BPF program could deadlock. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200224145643.259118710@linutronix.de
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/hashtab.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 65711a220fe0..431cef22d29d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -706,15 +706,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
struct htab_elem *l = container_of(head, struct htab_elem, rcu);
struct bpf_htab *htab = l->htab;
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
- * we're calling kfree, otherwise deadlock is possible if kprobes
- * are placed somewhere inside of slub
- */
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
htab_elem_free(htab, l);
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
}
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)