Skip to content

Commit f9bd89e

Browse files
committed
bpf: arm64: Optimize recursion detection by not using atomics
BPF programs detect recursion using a per-CPU 'active' flag in struct bpf_prog. The trampoline currently sets/clears this flag with atomic operations. On some arm64 platforms (e.g., Neoverse V2 with LSE), per-CPU atomic operations are relatively slow. Unlike x86_64 - where per-CPU updates can avoid cross-core atomicity, arm64 LSE atomics are always atomic across all cores, which is unnecessary overhead for strictly per-CPU state. This patch removes atomics from the recursion detection path on arm64 by changing 'active' to a per-CPU array of four u8 counters, one per context: {NMI, hard-irq, soft-irq, normal}. The running context uses a non-atomic increment/decrement on its element. After increment, recursion is detected by reading the array as a u32 and verifying that only the expected element changed; any change in another element indicates inter-context recursion, and a value > 1 in the same element indicates same-context recursion. For example, starting from {0,0,0,0}, a normal-context trigger changes the array to {0,0,0,1}. If an NMI arrives on the same CPU and triggers the program, the array becomes {1,0,0,1}. When the NMI context checks the u32 against the expected mask for normal (0x00000001), it observes 0x01000001 and correctly reports recursion. Same-context recursion is detected analogously. Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
1 parent 3578ec0 commit f9bd89e

File tree

2 files changed

+32
-4
lines changed

2 files changed

+32
-4
lines changed

include/linux/bpf.h

Lines changed: 30 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include <linux/static_call.h>
3232
#include <linux/memcontrol.h>
3333
#include <linux/cfi.h>
34+
#include <linux/unaligned.h>
3435
#include <asm/rqspinlock.h>
3536

3637
struct bpf_verifier_env;
@@ -1746,6 +1747,8 @@ struct bpf_prog_aux {
17461747
struct bpf_map __rcu *st_ops_assoc;
17471748
};
17481749

1750+
#define BPF_NR_CONTEXTS 4 /* normal, softirq, hardirq, NMI */
1751+
17491752
struct bpf_prog {
17501753
u16 pages; /* Number of allocated pages */
17511754
u16 jited:1, /* Is our filter JIT'ed? */
@@ -1772,7 +1775,7 @@ struct bpf_prog {
17721775
u8 tag[BPF_TAG_SIZE];
17731776
};
17741777
struct bpf_prog_stats __percpu *stats;
1775-
int __percpu *active;
1778+
u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for rerecursion protection */
17761779
unsigned int (*bpf_func)(const void *ctx,
17771780
const struct bpf_insn *insn);
17781781
struct bpf_prog_aux *aux; /* Auxiliary fields */
@@ -2006,12 +2009,36 @@ struct bpf_struct_ops_common_value {
20062009

20072010
static inline bool bpf_prog_get_recursion_context(struct bpf_prog *prog)
20082011
{
2009-
return this_cpu_inc_return(*(prog->active)) == 1;
2012+
#ifdef CONFIG_ARM64
2013+
u8 rctx = interrupt_context_level();
2014+
u8 *active = this_cpu_ptr(prog->active);
2015+
u32 val;
2016+
2017+
preempt_disable();
2018+
active[rctx]++;
2019+
val = get_unaligned_le32(active);
2020+
preempt_enable();
2021+
if (val != BIT(rctx * 8))
2022+
return false;
2023+
2024+
return true;
2025+
#else
2026+
return this_cpu_inc_return(*(int __percpu *)(prog->active)) == 1;
2027+
#endif
20102028
}
20112029

20122030
static inline void bpf_prog_put_recursion_context(struct bpf_prog *prog)
20132031
{
2014-
this_cpu_dec(*(prog->active));
2032+
#ifdef CONFIG_ARM64
2033+
u8 rctx = interrupt_context_level();
2034+
u8 *active = this_cpu_ptr(prog->active);
2035+
2036+
preempt_disable();
2037+
active[rctx]--;
2038+
preempt_enable();
2039+
#else
2040+
this_cpu_dec(*(int __percpu *)(prog->active));
2041+
#endif
20152042
}
20162043

20172044
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)

kernel/bpf/core.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,8 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
112112
vfree(fp);
113113
return NULL;
114114
}
115-
fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
115+
fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 8,
116+
bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
116117
if (!fp->active) {
117118
vfree(fp);
118119
kfree(aux);

0 commit comments

Comments
 (0)