diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index 6780a6d8174580ea1caeac4a13fb4f8dae6bd91b..7cc9e368c1e9b48244c97bce151561dca3c634e7 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -157,12 +157,11 @@ Q: Does BPF have a stable ABI?
 ------------------------------
 A: YES. BPF instructions, arguments to BPF programs, set of helper
 functions and their arguments, recognized return codes are all part
-of ABI. However when tracing programs are using bpf_probe_read() helper
-to walk kernel internal datastructures and compile with kernel
-internal headers these accesses can and will break with newer
-kernels. The union bpf_attr -> kern_version is checked at load time
-to prevent accidentally loading kprobe-based bpf programs written
-for a different kernel. Networking programs don't do kern_version check.
+of ABI. However there is one specific exception to tracing programs
+which are using helpers like bpf_probe_read() to walk kernel internal
+data structures and compile with kernel internal headers. Both of these
+kernel internals are subject to change and can break with newer kernels
+such that the program needs to be adapted accordingly.
 
 Q: How much stack space a BPF program uses?
 -------------------------------------------
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 27b74947cd2bd9de8e9dba3518fc876b2308a528..573cca00a0e6cda76014e8a0610aaaf994a78455 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -172,6 +172,7 @@ struct bpf_verifier_state_list {
 #define BPF_ALU_SANITIZE_SRC		1U
 #define BPF_ALU_SANITIZE_DST		2U
 #define BPF_ALU_NEG_VALUE		(1U << 2)
+#define BPF_ALU_NON_POINTER		(1U << 3)
 #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
 					 BPF_ALU_SANITIZE_DST)
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f6bc62a9ee8e9ddea251e353dfe4b6758560ba4f..56674a7c377884b17d8296b6cf6f6e31a8158916 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3103,6 +3103,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
 	}
 }
 
+static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+				    const struct bpf_insn *insn)
+{
+	return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
+}
+
+static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
+				       u32 alu_state, u32 alu_limit)
+{
+	/* If we arrived here from different branches with different
+	 * state or limits to sanitize, then this won't work.
+	 */
+	if (aux->alu_state &&
+	    (aux->alu_state != alu_state ||
+	     aux->alu_limit != alu_limit))
+		return -EACCES;
+
+	/* Corresponding fixup done in fixup_bpf_calls(). */
+	aux->alu_state = alu_state;
+	aux->alu_limit = alu_limit;
+	return 0;
+}
+
+static int sanitize_val_alu(struct bpf_verifier_env *env,
+			    struct bpf_insn *insn)
+{
+	struct bpf_insn_aux_data *aux = cur_aux(env);
+
+	if (can_skip_alu_sanitation(env, insn))
+		return 0;
+
+	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+}
+
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 			    struct bpf_insn *insn,
 			    const struct bpf_reg_state *ptr_reg,
@@ -3117,7 +3151,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 	struct bpf_reg_state tmp;
 	bool ret;
 
-	if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
+	if (can_skip_alu_sanitation(env, insn))
 		return 0;
 
 	/* We already marked aux for masking from non-speculative
@@ -3133,19 +3167,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 
 	if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
 		return 0;
-
-	/* If we arrived here from different branches with different
-	 * limits to sanitize, then this won't work.
-	 */
-	if (aux->alu_state &&
-	    (aux->alu_state != alu_state ||
-	     aux->alu_limit != alu_limit))
+	if (update_alu_sanitation_state(aux, alu_state, alu_limit))
 		return -EACCES;
-
-	/* Corresponding fixup done in fixup_bpf_calls(). */
-	aux->alu_state = alu_state;
-	aux->alu_limit = alu_limit;
-
 do_sim:
 	/* Simulate and find potential out-of-bounds access under
 	 * speculative execution from truncation as a result of
@@ -3418,6 +3441,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 	s64 smin_val, smax_val;
 	u64 umin_val, umax_val;
 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+	u32 dst = insn->dst_reg;
+	int ret;
 
 	if (insn_bitness == 32) {
 		/* Relevant for 32-bit RSH: Information can propagate towards
@@ -3452,6 +3477,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 
 	switch (opcode) {
 	case BPF_ADD:
+		ret = sanitize_val_alu(env, insn);
+		if (ret < 0) {
+			verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+			return ret;
+		}
 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
 			dst_reg->smin_value = S64_MIN;
@@ -3471,6 +3501,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
 		break;
 	case BPF_SUB:
+		ret = sanitize_val_alu(env, insn);
+		if (ret < 0) {
+			verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+			return ret;
+		}
 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
 			/* Overflow possible, we know nothing */
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9cbf363172bdc2010d2c01b91e49f3ecc1b895fc..7c3505006f8e59509802b1063f23418dd4c3c1ce 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1390,10 +1390,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 	ipc6.opt = opt;
 
 	fl6.flowi6_proto = sk->sk_protocol;
-	if (!ipv6_addr_any(daddr))
-		fl6.daddr = *daddr;
-	else
-		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+	fl6.daddr = *daddr;
 	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
 		fl6.saddr = np->saddr;
 	fl6.fl6_sport = inet->inet_sport;
@@ -1421,6 +1418,9 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 		}
 	}
 
+	if (ipv6_addr_any(&fl6.daddr))
+		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+
 	final_p = fl6_update_dst(&fl6, opt, &final);
 	if (final_p)
 		connected = false;
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index d7b68ef5ba792ec66d3d8bc26463c61e964c41c3..0bb6507256b77dacd5ffa18f407ffc8a2d021b5d 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -77,7 +77,7 @@ static int test_foo_bar(void)
 
 	/* Create cgroup /foo, get fd, and join it */
 	foo = create_and_get_cgroup(FOO);
-	if (!foo)
+	if (foo < 0)
 		goto err;
 
 	if (join_cgroup(FOO))
@@ -94,7 +94,7 @@ static int test_foo_bar(void)
 
 	/* Create cgroup /foo/bar, get fd, and join it */
 	bar = create_and_get_cgroup(BAR);
-	if (!bar)
+	if (bar < 0)
 		goto err;
 
 	if (join_cgroup(BAR))
@@ -298,19 +298,19 @@ static int test_multiprog(void)
 		goto err;
 
 	cg1 = create_and_get_cgroup("/cg1");
-	if (!cg1)
+	if (cg1 < 0)
 		goto err;
 	cg2 = create_and_get_cgroup("/cg1/cg2");
-	if (!cg2)
+	if (cg2 < 0)
 		goto err;
 	cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
-	if (!cg3)
+	if (cg3 < 0)
 		goto err;
 	cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
-	if (!cg4)
+	if (cg4 < 0)
 		goto err;
 	cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
-	if (!cg5)
+	if (cg5 < 0)
 		goto err;
 
 	if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
index 2259f997a26c7f3e955bcae022d8c83767019a33..f082d6ac59f00b2b1d5c892ee3f173495f156d0f 100644
--- a/samples/bpf/test_current_task_under_cgroup_user.c
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -32,7 +32,7 @@ int main(int argc, char **argv)
 
 	cg2 = create_and_get_cgroup(CGROUP_PATH);
 
-	if (!cg2)
+	if (cg2 < 0)
 		goto err;
 
 	if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index 056f383107226082d5a7e693f8f888651a84520b..607aae40f4edaa95514bc76403ea42d8ed580748 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -132,6 +132,20 @@ For example, if current state of ``libbpf.map`` is:
 Format of version script and ways to handle ABI changes, including
 incompatible ones, described in details in [1].
 
+Stand-alone build
+=================
+
+Under https://github.com/libbpf/libbpf there is a (semi-)automated
+mirror of the mainline's version of libbpf for a stand-alone build.
+
+However, all changes to libbpf's code base must be upstreamed through
+the mainline kernel tree.
+
+License
+=======
+
+libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause.
+
 Links
 =====
 
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index cf16948aad4adb1e32d33d21f0742ddf724e524e..6692a40a6979eac0b48665e77566e6beef066d17 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -155,7 +155,7 @@ void cleanup_cgroup_environment(void)
  * This function creates a cgroup under the top level workdir and returns the
  * file descriptor. It is idempotent.
  *
- * On success, it returns the file descriptor. On failure it returns 0.
+ * On success, it returns the file descriptor. On failure it returns -1.
  * If there is a failure, it prints the error to stderr.
  */
 int create_and_get_cgroup(const char *path)
@@ -166,13 +166,13 @@ int create_and_get_cgroup(const char *path)
 	format_cgroup_path(cgroup_path, path);
 	if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
 		log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
-		return 0;
+		return -1;
 	}
 
 	fd = open(cgroup_path, O_RDONLY);
 	if (fd < 0) {
 		log_err("Opening Cgroup");
-		return 0;
+		return -1;
 	}
 
 	return fd;
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index f44834155f25e420270f28a49ece2376c1a95b5a..2fc4625c1a1502b5926614874253fd86715242c7 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -81,7 +81,7 @@ int main(int argc, char **argv)
 
 	/* Create a cgroup, get fd, and join it */
 	cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
-	if (!cgroup_fd) {
+	if (cgroup_fd < 0) {
 		printf("Failed to create test cgroup\n");
 		goto err;
 	}
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 9c8b50bac7e01fd60dc436a5f986e384e2343b43..76e4993b7c16a45404c8a9bc7d7149a34b5796b0 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -43,7 +43,7 @@ int main(int argc, char **argv)
 
 	/* Create a cgroup, get fd, and join it */
 	cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
-	if (!cgroup_fd) {
+	if (cgroup_fd < 0) {
 		printf("Failed to create test cgroup\n");
 		goto err;
 	}
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
index 44ed7f29f8ab6cec40c83c02c01d28ca36c36492..c1da5404454a1551b792c642938050d6c427fda9 100644
--- a/tools/testing/selftests/bpf/test_netcnt.c
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -65,7 +65,7 @@ int main(int argc, char **argv)
 
 	/* Create a cgroup, get fd, and join it */
 	cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
-	if (!cgroup_fd) {
+	if (cgroup_fd < 0) {
 		printf("Failed to create test cgroup\n");
 		goto err;
 	}
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
index c121cc59f31429d4805bcf205dea79cbe1121609..9220747c069db6518b29cd0b3e106c6fafecbd01 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -164,7 +164,7 @@ int main(int argc, char **argv)
 		goto err;
 
 	cgfd = create_and_get_cgroup(CGROUP_PATH);
-	if (!cgfd)
+	if (cgfd < 0)
 		goto err;
 
 	if (join_cgroup(CGROUP_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index b8ebe2f580741a5cb4aae39e604affc313b31ff4..561ffb6d643349f794ff8dc9b00ee570c3dd369b 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -458,7 +458,7 @@ int main(int argc, char **argv)
 		goto err;
 
 	cgfd = create_and_get_cgroup(CG_PATH);
-	if (!cgfd)
+	if (cgfd < 0)
 		goto err;
 
 	if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index 73b7493d4120991527b61a2a33a9c0784542176b..3f110eaaf29cea214844ff98211697c764b8a870 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -44,6 +44,7 @@
 #define SERV6_V4MAPPED_IP	"::ffff:192.168.0.4"
 #define SRC6_IP			"::1"
 #define SRC6_REWRITE_IP		"::6"
+#define WILDCARD6_IP		"::"
 #define SERV6_PORT		6060
 #define SERV6_REWRITE_PORT	6666
 
@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
 static int bind6_prog_load(const struct sock_addr_test *test);
 static int connect4_prog_load(const struct sock_addr_test *test);
 static int connect6_prog_load(const struct sock_addr_test *test);
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
 static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
 static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
 
 static struct sock_addr_test tests[] = {
 	/* bind */
@@ -462,6 +465,34 @@ static struct sock_addr_test tests[] = {
 		SRC6_REWRITE_IP,
 		SYSCALL_ENOTSUPP,
 	},
+	{
+		"sendmsg6: set dst IP = [::] (BSD'ism)",
+		sendmsg6_rw_wildcard_prog_load,
+		BPF_CGROUP_UDP6_SENDMSG,
+		BPF_CGROUP_UDP6_SENDMSG,
+		AF_INET6,
+		SOCK_DGRAM,
+		SERV6_IP,
+		SERV6_PORT,
+		SERV6_REWRITE_IP,
+		SERV6_REWRITE_PORT,
+		SRC6_REWRITE_IP,
+		SUCCESS,
+	},
+	{
+		"sendmsg6: preserve dst IP = [::] (BSD'ism)",
+		sendmsg_allow_prog_load,
+		BPF_CGROUP_UDP6_SENDMSG,
+		BPF_CGROUP_UDP6_SENDMSG,
+		AF_INET6,
+		SOCK_DGRAM,
+		WILDCARD6_IP,
+		SERV6_PORT,
+		SERV6_REWRITE_IP,
+		SERV6_PORT,
+		SRC6_IP,
+		SUCCESS,
+	},
 	{
 		"sendmsg6: deny call",
 		sendmsg_deny_prog_load,
@@ -734,16 +765,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
 	return load_path(test, CONNECT6_PROG_PATH);
 }
 
-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
+				      int32_t rc)
 {
 	struct bpf_insn insns[] = {
-		/* return 0 */
-		BPF_MOV64_IMM(BPF_REG_0, 0),
+		/* return rc */
+		BPF_MOV64_IMM(BPF_REG_0, rc),
 		BPF_EXIT_INSN(),
 	};
 	return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
 }
 
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
+{
+	return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
+}
+
+static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+{
+	return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
+}
+
 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
 {
 	struct sockaddr_in dst4_rw_addr;
@@ -864,6 +906,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
 	return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
 }
 
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
+{
+	return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
+}
+
 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
 {
 	return load_path(test, SENDMSG6_PROG_PATH);
@@ -1395,7 +1442,7 @@ int main(int argc, char **argv)
 		goto err;
 
 	cgfd = create_and_get_cgroup(CG_PATH);
-	if (!cgfd)
+	if (cgfd < 0)
 		goto err;
 
 	if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index b6c2c605d8c050794cef847c73f779e40378b0ab..fc7832ee566b8e2310f323ec3f6738a67122e759 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -202,7 +202,7 @@ int main(int argc, char **argv)
 		goto err;
 
 	cgfd = create_and_get_cgroup(CG_PATH);
-	if (!cgfd)
+	if (cgfd < 0)
 		goto err;
 
 	if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
index e6eebda7d112b771de662c5b970c0848186595ef..716b4e3be5813d5c2ab37000cb1883aa45728c45 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -103,7 +103,7 @@ int main(int argc, char **argv)
 		goto err;
 
 	cg_fd = create_and_get_cgroup(cg_path);
-	if (!cg_fd)
+	if (cg_fd < 0)
 		goto err;
 
 	if (join_cgroup(cg_path))
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index ff3c4522aed6823b2af492454b889aa79b3bbc2b..4e4353711a86b5cf9e0ad7902377e481c1d98ebd 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -115,7 +115,7 @@ int main(int argc, char **argv)
 		goto err;
 
 	cg_fd = create_and_get_cgroup(cg_path);
-	if (!cg_fd)
+	if (cg_fd < 0)
 		goto err;
 
 	if (join_cgroup(cg_path))
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 10d44446e8013a19d8c01e5e15a94a919bae94cb..2fd90d4568926d13542783c870507d43a6d6bb64 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -6933,6 +6933,126 @@ static struct bpf_test tests[] = {
 		.result = ACCEPT,
 		.retval = 1,
 	},
+	{
+		"map access: mixing value pointer and scalar, 1",
+		.insns = {
+			// load map value pointer into r0 and r2
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			// load some number from the map into r1
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			// depending on r1, branch:
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
+			// branch A
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+			BPF_MOV64_IMM(BPF_REG_3, 0),
+			BPF_JMP_A(2),
+			// branch B
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+			// common instruction
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+			// depending on r1, branch:
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+			// branch A
+			BPF_JMP_A(4),
+			// branch B
+			BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+			// verifier follows fall-through
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			// fake-dead code; targeted from branch A to
+			// prevent dead code sanitization
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 1 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R2 tried to add from different pointers or scalars",
+		.retval = 0,
+	},
+	{
+		"map access: mixing value pointer and scalar, 2",
+		.insns = {
+			// load map value pointer into r0 and r2
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			// load some number from the map into r1
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			// depending on r1, branch:
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+			// branch A
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+			BPF_JMP_A(2),
+			// branch B
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+			BPF_MOV64_IMM(BPF_REG_3, 0),
+			// common instruction
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+			// depending on r1, branch:
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+			// branch A
+			BPF_JMP_A(4),
+			// branch B
+			BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+			// verifier follows fall-through
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			// fake-dead code; targeted from branch A to
+			// prevent dead code sanitization
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 1 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R2 tried to add from different maps or paths",
+		.retval = 0,
+	},
+	{
+		"sanitation: alu with different scalars",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+			BPF_JMP_A(2),
+			BPF_MOV64_IMM(BPF_REG_2, 42),
+			BPF_MOV64_IMM(BPF_REG_3, 0x100001),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 1 },
+		.result = ACCEPT,
+		.retval = 0x100000,
+	},
 	{
 		"map access: value_ptr += known scalar, upper oob arith, test 1",
 		.insns = {