From 190985b1dbfd8cb015358afb685a16fd88e58a43 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Jan 2024 11:28:49 +0000 Subject: [PATCH] Revert "bpf: decouple prune and jump points" This reverts commit 743f3548d3018d1f25c8d7ef8e22baad2d06bb9b which is commit bffdeaa8a5af7200b0e74c9d5a41167f86626a36 upstream. It breaks the Android kernel abi and can be brought back in the future in an abi-safe way if it is really needed. Bug: 161946584 Change-Id: If00d7f3353d6d173c93006a76d575194c7e4f517 Signed-off-by: Greg Kroah-Hartman --- include/linux/bpf_verifier.h | 1 - kernel/bpf/verifier.c | 57 +++++++++--------------------------- 2 files changed, 14 insertions(+), 44 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1c241a75b7b4..c8d0579454be 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -430,7 +430,6 @@ struct bpf_insn_aux_data { /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ bool prune_point; - bool jmp_point; }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d62df84908ba..30716b74ceeb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2512,16 +2512,6 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, return 0; } -static void mark_jmp_point(struct bpf_verifier_env *env, int idx) -{ - env->insn_aux_data[idx].jmp_point = true; -} - -static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) -{ - return env->insn_aux_data[insn_idx].jmp_point; -} - /* for any branch, call, exit record the history of jmps in the given state */ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur) @@ -2530,9 +2520,6 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_idx_pair *p; size_t alloc_size; - if (!is_jmp_point(env, env->insn_idx)) - return 0; - cnt++; alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p))); p = krealloc(cur->jmp_history, alloc_size, GFP_USER); @@ -11030,16 +11017,11 @@ static struct bpf_verifier_state_list **explored_state( return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; } -static void mark_prune_point(struct bpf_verifier_env *env, int idx) +static void init_explored_state(struct bpf_verifier_env *env, int idx) { env->insn_aux_data[idx].prune_point = true; } -static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) -{ - return env->insn_aux_data[insn_idx].prune_point; -} - enum { DONE_EXPLORING = 0, KEEP_EXPLORING = 1, @@ -11068,11 +11050,9 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, return -EINVAL; } - if (e == BRANCH) { + if (e == BRANCH) /* mark branch target for state pruning */ - mark_prune_point(env, w); - mark_jmp_point(env, w); - } + init_explored_state(env, w); if (insn_state[w] == 0) { /* tree-edge */ @@ -11110,13 +11090,10 @@ static int visit_func_call_insn(int t, int insn_cnt, if (ret) return ret; - if (t + 1 < insn_cnt) { - mark_prune_point(env, t + 1); - mark_jmp_point(env, t + 1); - } + if (t + 1 < insn_cnt) + init_explored_state(env, t + 1); if (visit_callee) { - mark_prune_point(env, t); - mark_jmp_point(env, t); + init_explored_state(env, t); ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, /* It's ok to allow recursion from CFG point of * view. __check_func_call() will do the actual @@ -11150,15 +11127,13 @@ static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) return DONE_EXPLORING; case BPF_CALL: - if (insns[t].imm == BPF_FUNC_timer_set_callback) { + if (insns[t].imm == BPF_FUNC_timer_set_callback) /* Mark this call insn to trigger is_state_visited() check * before call itself is processed by __check_func_call(). * Otherwise new async state will be pushed for further * exploration. */ - mark_prune_point(env, t); - mark_jmp_point(env, t); - } + init_explored_state(env, t); return visit_func_call_insn(t, insn_cnt, insns, env, insns[t].src_reg == BPF_PSEUDO_CALL); @@ -11176,22 +11151,18 @@ static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) * but it's marked, since backtracking needs * to record jmp history in is_state_visited(). */ - mark_prune_point(env, t + insns[t].off + 1); - mark_jmp_point(env, t + insns[t].off + 1); + init_explored_state(env, t + insns[t].off + 1); /* tell verifier to check for equivalent states * after every call and jump */ - if (t + 1 < insn_cnt) { - mark_prune_point(env, t + 1); - mark_jmp_point(env, t + 1); - } + if (t + 1 < insn_cnt) + init_explored_state(env, t + 1); return ret; default: /* conditional jump with two edges */ - mark_prune_point(env, t); - mark_jmp_point(env, t); + init_explored_state(env, t); ret = push_insn(t, t + 1, FALLTHROUGH, env, true); if (ret) return ret; @@ -12224,11 +12195,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) bool add_new_state = env->test_state_freq ? true : false; cur->last_insn_idx = env->prev_insn_idx; - if (!is_prune_point(env, insn_idx)) + if (!env->insn_aux_data[insn_idx].prune_point) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ - return push_jmp_history(env, cur); + return 0; /* bpf progs typically have pruning point every 4 instructions * http://vger.kernel.org/bpfconf2019.html#session-1