diff options
author | Shung-Hsi Yu <shung-hsi.yu@suse.com> | 2024-07-12 16:01:25 +0800 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2024-07-12 08:54:08 -0700 |
commit | 28a4411076b254c67842348e3b25c2fb41a94cad (patch) | |
tree | 8f848cb9dfb0438cfa5d709cb559de66e8259c4f /kernel | |
parent | 4a04b4f0de59dd5c621e78f15803ee0b0544eeb8 (diff) |
bpf: use check_add_overflow() to check for addition overflows
signed_add*_overflows() was added back when there was no overflow-check
helper. With the introduction of such helpers in commit f0907827a8a91
("compiler.h: enable builtin overflow checkers and add fallback code"), we
can drop signed_add*_overflows() in kernel/bpf/verifier.c and use the
generic check_add_overflow() instead.
This will make future refactoring easier, and takes advantage of
compiler-emitted hardware instructions that efficiently implement these
checks.
After the change GCC 13.3.0 generates cleaner assembly on x86_64:
err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
13625: mov 0x28(%rbx),%r9 /* r9 = src_reg->smin_value */
13629: mov 0x30(%rbx),%rcx /* rcx = src_reg->smax_value */
...
if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
141c1: mov %r9,%rax
141c4: add 0x28(%r12),%rax
141c9: mov %rax,0x28(%r12)
141ce: jo 146e4 <adjust_reg_min_max_vals+0x1294>
check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
141d4: add 0x30(%r12),%rcx
141d9: mov %rcx,0x30(%r12)
if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
141de: jo 146e4 <adjust_reg_min_max_vals+0x1294>
...
*dst_smin = S64_MIN;
146e4: movabs $0x8000000000000000,%rax
146ee: mov %rax,0x28(%r12)
*dst_smax = S64_MAX;
146f3: sub $0x1,%rax
146f7: mov %rax,0x30(%r12)
Before the change it gives:
s64 smin_val = src_reg->smin_value;
675: mov 0x28(%rsi),%r8
s64 smax_val = src_reg->smax_value;
u64 umin_val = src_reg->umin_value;
u64 umax_val = src_reg->umax_value;
679: mov %rdi,%rax /* rax = dst_reg */
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
67c: mov 0x28(%rdi),%rdi /* rdi = dst_reg->smin_value */
u64 umin_val = src_reg->umin_value;
680: mov 0x38(%rsi),%rdx
u64 umax_val = src_reg->umax_value;
684: mov 0x40(%rsi),%rcx
s64 res = (s64)((u64)a + (u64)b);
688: lea (%r8,%rdi,1),%r9 /* r9 = dst_reg->smin_value + src_reg->smin_value */
return res < a;
68c: cmp %r9,%rdi
68f: setg %r10b /* r10b = (dst_reg->smin_value + src_reg->smin_value) > dst_reg->smin_value */
if (b < 0)
693: test %r8,%r8
696: js 72b <scalar_min_max_add+0xbb>
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
69c: movabs $0x7fffffffffffffff,%rdi
s64 smax_val = src_reg->smax_value;
6a6: mov 0x30(%rsi),%r8
dst_reg->smin_value = S64_MIN;
6aa: 00 00 00 movabs $0x8000000000000000,%rsi
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
6b4: test %r10b,%r10b /* (dst_reg->smin_value + src_reg->smin_value) > dst_reg->smin_value ? goto 6cb */
6b7: jne 6cb <scalar_min_max_add+0x5b>
signed_add_overflows(dst_reg->smax_value, smax_val)) {
6b9: mov 0x30(%rax),%r10 /* r10 = dst_reg->smax_value */
s64 res = (s64)((u64)a + (u64)b);
6bd: lea (%r10,%r8,1),%r11 /* r11 = dst_reg->smax_value + src_reg->smax_value */
if (b < 0)
6c1: test %r8,%r8
6c4: js 71e <scalar_min_max_add+0xae>
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
6c6: cmp %r11,%r10 /* (dst_reg->smax_value + src_reg->smax_value) <= dst_reg->smax_value ? goto 723 */
6c9: jle 723 <scalar_min_max_add+0xb3>
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
}
6cb: mov %rsi,0x28(%rax)
...
6d5: mov %rdi,0x30(%rax)
...
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
71e: cmp %r11,%r10
721: jl 6cb <scalar_min_max_add+0x5b>
dst_reg->smin_value += smin_val;
723: mov %r9,%rsi
dst_reg->smax_value += smax_val;
726: mov %r11,%rdi
729: jmp 6cb <scalar_min_max_add+0x5b>
return res > a;
72b: cmp %r9,%rdi
72e: setl %r10b
732: jmp 69c <scalar_min_max_add+0x2c>
737: nopw 0x0(%rax,%rax,1)
Note: unlike adjust_ptr_min_max_vals() and scalar*_min_max_add(), it is
necessary to introduce intermediate variable in adjust_jmp_off() to keep
the functional behavior unchanged. Without an intermediate variable
imm/off will be altered even on overflow.
Suggested-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
Link: https://lore.kernel.org/r/20240712080127.136608-3-shung-hsi.yu@suse.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/verifier.c | 114 |
1 files changed, 34 insertions, 80 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cf2eb07ddf28..0126c9c80c58 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12726,36 +12726,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return 0; } -static bool signed_add_overflows(s64 a, s64 b) -{ - /* Do the add in u64, where overflow is well-defined */ - s64 res = (s64)((u64)a + (u64)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_add32_overflows(s32 a, s32 b) -{ - /* Do the add in u32, where overflow is well-defined */ - s32 res = (s32)((u32)a + (u32)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_add16_overflows(s16 a, s16 b) -{ - /* Do the add in u16, where overflow is well-defined */ - s16 res = (s16)((u16)a + (u16)b); - - if (b < 0) - return res > a; - return res < a; -} - static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ @@ -13257,21 +13227,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ - if (signed_add_overflows(smin_ptr, smin_val) || - signed_add_overflows(smax_ptr, smax_val)) { + if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) || + check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value = smin_ptr + smin_val; - dst_reg->smax_value = smax_ptr + smax_val; } - if (umin_ptr + umin_val < umin_ptr || - umax_ptr + umax_val < umax_ptr) { + if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) || + check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; - } else { - dst_reg->umin_value = umin_ptr + umin_val; - dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; @@ -13374,52 +13338,40 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s32 smin_val = src_reg->s32_min_value; - s32 smax_val = src_reg->s32_max_value; - u32 umin_val = src_reg->u32_min_value; - u32 umax_val = src_reg->u32_max_value; + s32 *dst_smin = &dst_reg->s32_min_value; + s32 *dst_smax = &dst_reg->s32_max_value; + u32 *dst_umin = &dst_reg->u32_min_value; + u32 *dst_umax = &dst_reg->u32_max_value; - if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || - signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { - dst_reg->s32_min_value = S32_MIN; - dst_reg->s32_max_value = S32_MAX; - } else { - dst_reg->s32_min_value += smin_val; - dst_reg->s32_max_value += smax_val; + if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) || + check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) { + *dst_smin = S32_MIN; + *dst_smax = S32_MAX; } - if (dst_reg->u32_min_value + umin_val < umin_val || - dst_reg->u32_max_value + umax_val < umax_val) { - dst_reg->u32_min_value = 0; - dst_reg->u32_max_value = U32_MAX; - } else { - dst_reg->u32_min_value += umin_val; - dst_reg->u32_max_value += umax_val; + if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) || + check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) { + *dst_umin = 0; + *dst_umax = U32_MAX; } } static void scalar_min_max_add(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s64 smin_val = src_reg->smin_value; - s64 smax_val = src_reg->smax_value; - u64 umin_val = src_reg->umin_value; - u64 umax_val = src_reg->umax_value; + s64 *dst_smin = &dst_reg->smin_value; + s64 *dst_smax = &dst_reg->smax_value; + u64 *dst_umin = &dst_reg->umin_value; + u64 *dst_umax = &dst_reg->umax_value; - if (signed_add_overflows(dst_reg->smin_value, smin_val) || - signed_add_overflows(dst_reg->smax_value, smax_val)) { - dst_reg->smin_value = S64_MIN; - dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value += smin_val; - dst_reg->smax_value += smax_val; + if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) || + check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) { + *dst_smin = S64_MIN; + *dst_smax = S64_MAX; } - if (dst_reg->umin_value + umin_val < umin_val || - dst_reg->umax_value + umax_val < umax_val) { - dst_reg->umin_value = 0; - dst_reg->umax_value = U64_MAX; - } else { - dst_reg->umin_value += umin_val; - dst_reg->umax_value += umax_val; + if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) || + check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) { + *dst_umin = 0; + *dst_umax = U64_MAX; } } @@ -18835,6 +18787,8 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) { struct bpf_insn *insn = prog->insnsi; u32 insn_cnt = prog->len, i; + s32 imm; + s16 off; for (i = 0; i < insn_cnt; i++, insn++) { u8 code = insn->code; @@ -18846,15 +18800,15 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) if (insn->code == (BPF_JMP32 | BPF_JA)) { if (i + 1 + insn->imm != tgt_idx) continue; - if (signed_add32_overflows(insn->imm, delta)) + if (check_add_overflow(insn->imm, delta, &imm)) return -ERANGE; - insn->imm += delta; + insn->imm = imm; } else { if (i + 1 + insn->off != tgt_idx) continue; - if (signed_add16_overflows(insn->off, delta)) + if (check_add_overflow(insn->off, delta, &off)) return -ERANGE; - insn->off += delta; + insn->off = off; } } return 0; |