diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 27b74947cd2bd9de8e9dba3518fc876b2308a528..573cca00a0e6cda76014e8a0610aaaf994a78455 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -172,6 +172,7 @@ struct bpf_verifier_state_list {
 #define BPF_ALU_SANITIZE_SRC		1U
 #define BPF_ALU_SANITIZE_DST		2U
 #define BPF_ALU_NEG_VALUE		(1U << 2)
+#define BPF_ALU_NON_POINTER		(1U << 3)
 #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
 					 BPF_ALU_SANITIZE_DST)
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f6bc62a9ee8e9ddea251e353dfe4b6758560ba4f..56674a7c377884b17d8296b6cf6f6e31a8158916 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3103,6 +3103,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
 	}
 }
 
+static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+				    const struct bpf_insn *insn)
+{
+	return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
+}
+
+static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
+				       u32 alu_state, u32 alu_limit)
+{
+	/* If we arrived here from different branches with different
+	 * state or limits to sanitize, then this won't work.
+	 */
+	if (aux->alu_state &&
+	    (aux->alu_state != alu_state ||
+	     aux->alu_limit != alu_limit))
+		return -EACCES;
+
+	/* Corresponding fixup done in fixup_bpf_calls(). */
+	aux->alu_state = alu_state;
+	aux->alu_limit = alu_limit;
+	return 0;
+}
+
+static int sanitize_val_alu(struct bpf_verifier_env *env,
+			    struct bpf_insn *insn)
+{
+	struct bpf_insn_aux_data *aux = cur_aux(env);
+
+	if (can_skip_alu_sanitation(env, insn))
+		return 0;
+
+	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+}
+
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 			    struct bpf_insn *insn,
 			    const struct bpf_reg_state *ptr_reg,
@@ -3117,7 +3151,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 	struct bpf_reg_state tmp;
 	bool ret;
 
-	if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
+	if (can_skip_alu_sanitation(env, insn))
 		return 0;
 
 	/* We already marked aux for masking from non-speculative
@@ -3133,19 +3167,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 
 	if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
 		return 0;
-
-	/* If we arrived here from different branches with different
-	 * limits to sanitize, then this won't work.
-	 */
-	if (aux->alu_state &&
-	    (aux->alu_state != alu_state ||
-	     aux->alu_limit != alu_limit))
+	if (update_alu_sanitation_state(aux, alu_state, alu_limit))
 		return -EACCES;
-
-	/* Corresponding fixup done in fixup_bpf_calls(). */
-	aux->alu_state = alu_state;
-	aux->alu_limit = alu_limit;
-
 do_sim:
 	/* Simulate and find potential out-of-bounds access under
 	 * speculative execution from truncation as a result of
@@ -3418,6 +3441,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 	s64 smin_val, smax_val;
 	u64 umin_val, umax_val;
 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+	u32 dst = insn->dst_reg;
+	int ret;
 
 	if (insn_bitness == 32) {
 		/* Relevant for 32-bit RSH: Information can propagate towards
@@ -3452,6 +3477,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 
 	switch (opcode) {
 	case BPF_ADD:
+		ret = sanitize_val_alu(env, insn);
+		if (ret < 0) {
+			verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+			return ret;
+		}
 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
 			dst_reg->smin_value = S64_MIN;
@@ -3471,6 +3501,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
 		break;
 	case BPF_SUB:
+		ret = sanitize_val_alu(env, insn);
+		if (ret < 0) {
+			verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+			return ret;
+		}
 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
 			/* Overflow possible, we know nothing */
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 10d44446e8013a19d8c01e5e15a94a919bae94cb..2fd90d4568926d13542783c870507d43a6d6bb64 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -6933,6 +6933,126 @@ static struct bpf_test tests[] = {
 		.result = ACCEPT,
 		.retval = 1,
 	},
+	{
+		"map access: mixing value pointer and scalar, 1",
+		.insns = {
+			// load map value pointer into r0 and r2
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			// load some number from the map into r1
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			// depending on r1, branch:
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
+			// branch A
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+			BPF_MOV64_IMM(BPF_REG_3, 0),
+			BPF_JMP_A(2),
+			// branch B
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+			// common instruction
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+			// depending on r1, branch:
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+			// branch A
+			BPF_JMP_A(4),
+			// branch B
+			BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+			// verifier follows fall-through
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			// fake-dead code; targeted from branch A to
+			// prevent dead code sanitization
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 1 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R2 tried to add from different pointers or scalars",
+		.retval = 0,
+	},
+	{
+		"map access: mixing value pointer and scalar, 2",
+		.insns = {
+			// load map value pointer into r0 and r2
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			// load some number from the map into r1
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			// depending on r1, branch:
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+			// branch A
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+			BPF_JMP_A(2),
+			// branch B
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+			BPF_MOV64_IMM(BPF_REG_3, 0),
+			// common instruction
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+			// depending on r1, branch:
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+			// branch A
+			BPF_JMP_A(4),
+			// branch B
+			BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+			// verifier follows fall-through
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			// fake-dead code; targeted from branch A to
+			// prevent dead code sanitization
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 1 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R2 tried to add from different maps or paths",
+		.retval = 0,
+	},
+	{
+		"sanitation: alu with different scalars",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+			BPF_JMP_A(2),
+			BPF_MOV64_IMM(BPF_REG_2, 42),
+			BPF_MOV64_IMM(BPF_REG_3, 0x100001),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 1 },
+		.result = ACCEPT,
+		.retval = 0x100000,
+	},
 	{
 		"map access: value_ptr += known scalar, upper oob arith, test 1",
 		.insns = {