blob: 65ae44c85d2793856dc28d9d4d8c803389c0ee97 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Joe Stringerb584ab82018-10-02 13:35:38 -07006 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
11 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020012
Daniel Borkmann2c460622017-08-04 22:24:41 +020013#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080014#include <asm/types.h>
15#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010016#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070017#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010018#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070021#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070022#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070023#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020024#include <sched.h>
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010025#include <limits.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020026
Mickaël Salaünd02d8982017-02-10 00:21:37 +010027#include <sys/capability.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070028
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020029#include <linux/unistd.h>
30#include <linux/filter.h>
31#include <linux/bpf_perf_event.h>
32#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080033#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070034
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010035#include <bpf/bpf.h>
36
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020037#ifdef HAVE_GENHDR
38# include "autoconf.h"
39#else
40# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42# endif
43#endif
Daniel Borkmannfe8d6622018-02-26 22:34:32 +010044#include "bpf_rlimit.h"
Daniel Borkmanna82d8cd2018-05-14 23:22:34 +020045#include "bpf_rand.h"
Martin KaFai Lauaa5f0c92018-08-08 01:01:27 -070046#include "bpf_util.h"
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020047#include "../../../include/linux/filter.h"
48
Daniel Borkmann93731ef2018-05-04 01:08:13 +020049#define MAX_INSNS BPF_MAXINSNS
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020050#define MAX_FIXUPS 8
Roman Gushchind4c9f572018-08-02 14:27:28 -070051#define MAX_NR_MAPS 8
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080052#define POINTER_VALUE 0xcafe4all
53#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070054
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020055#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020056#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020057
Joe Stringer0a6748742018-02-14 13:50:36 -080058#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
59static bool unpriv_disabled = false;
60
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070061struct bpf_test {
62 const char *descr;
63 struct bpf_insn insns[MAX_INSNS];
Prashant Bhole908142e2018-10-09 10:04:53 +090064 int fixup_map_hash_8b[MAX_FIXUPS];
65 int fixup_map_hash_48b[MAX_FIXUPS];
66 int fixup_map_hash_16b[MAX_FIXUPS];
67 int fixup_map_array_48b[MAX_FIXUPS];
Daniel Borkmann06be0862018-06-02 23:06:31 +020068 int fixup_prog1[MAX_FIXUPS];
69 int fixup_prog2[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070070 int fixup_map_in_map[MAX_FIXUPS];
Roman Gushchind4c9f572018-08-02 14:27:28 -070071 int fixup_cgroup_storage[MAX_FIXUPS];
Roman Gushchina3c60542018-09-28 14:45:53 +000072 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070073 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070074 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080075 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070076 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070077 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070078 ACCEPT,
79 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070080 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070081 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020082 uint8_t flags;
Daniel Borkmann93731ef2018-05-04 01:08:13 +020083 __u8 data[TEST_DATA_LEN];
84 void (*fill_helper)(struct bpf_test *self);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070085};
86
Josef Bacik48461132016-09-28 10:54:32 -040087/* Note we want this to be 64 bit aligned so that the end of our array is
88 * actually the end of the structure.
89 */
90#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040091
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020092struct test_val {
93 unsigned int index;
94 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040095};
96
Paul Chaignon5f90dd62018-04-24 15:08:19 +020097struct other_val {
98 long long foo;
99 long long bar;
100};
101
Daniel Borkmann93731ef2018-05-04 01:08:13 +0200102static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
103{
104 /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
105#define PUSH_CNT 51
106 unsigned int len = BPF_MAXINSNS;
107 struct bpf_insn *insn = self->insns;
108 int i = 0, j, k = 0;
109
110 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
111loop:
112 for (j = 0; j < PUSH_CNT; j++) {
113 insn[i++] = BPF_LD_ABS(BPF_B, 0);
114 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
115 i++;
116 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
117 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
118 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
119 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
120 BPF_FUNC_skb_vlan_push),
121 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
122 i++;
123 }
124
125 for (j = 0; j < PUSH_CNT; j++) {
126 insn[i++] = BPF_LD_ABS(BPF_B, 0);
127 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
128 i++;
129 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
130 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
131 BPF_FUNC_skb_vlan_pop),
132 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
133 i++;
134 }
135 if (++k < 5)
136 goto loop;
137
138 for (; i < len - 1; i++)
139 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
140 insn[len - 1] = BPF_EXIT_INSN();
141}
142
143static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
144{
145 struct bpf_insn *insn = self->insns;
146 unsigned int len = BPF_MAXINSNS;
147 int i = 0;
148
149 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
150 insn[i++] = BPF_LD_ABS(BPF_B, 0);
151 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
152 i++;
153 while (i < len - 1)
154 insn[i++] = BPF_LD_ABS(BPF_B, 1);
155 insn[i] = BPF_EXIT_INSN();
156}
157
Daniel Borkmanna82d8cd2018-05-14 23:22:34 +0200158static void bpf_fill_rand_ld_dw(struct bpf_test *self)
159{
160 struct bpf_insn *insn = self->insns;
161 uint64_t res = 0;
162 int i = 0;
163
164 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
165 while (i < self->retval) {
166 uint64_t val = bpf_semi_rand_get();
167 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
168
169 res ^= val;
170 insn[i++] = tmp[0];
171 insn[i++] = tmp[1];
172 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
173 }
174 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
175 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
176 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
177 insn[i] = BPF_EXIT_INSN();
178 res ^= (res >> 32);
179 self->retval = (uint32_t)res;
180}
181
Joe Stringerb584ab82018-10-02 13:35:38 -0700182/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
183#define BPF_SK_LOOKUP \
184 /* struct bpf_sock_tuple tuple = {} */ \
185 BPF_MOV64_IMM(BPF_REG_2, 0), \
186 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
187 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
188 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
189 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
190 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
191 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
192 /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \
193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
195 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
196 BPF_MOV64_IMM(BPF_REG_4, 0), \
197 BPF_MOV64_IMM(BPF_REG_5, 0), \
198 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
199
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700200static struct bpf_test tests[] = {
201 {
202 "add+sub+mul",
203 .insns = {
204 BPF_MOV64_IMM(BPF_REG_1, 1),
205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
206 BPF_MOV64_IMM(BPF_REG_2, 3),
207 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
209 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
210 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
211 BPF_EXIT_INSN(),
212 },
213 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800214 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700215 },
216 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100217 "DIV32 by 0, zero check 1",
218 .insns = {
219 BPF_MOV32_IMM(BPF_REG_0, 42),
220 BPF_MOV32_IMM(BPF_REG_1, 0),
221 BPF_MOV32_IMM(BPF_REG_2, 1),
222 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
223 BPF_EXIT_INSN(),
224 },
225 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100226 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100227 },
228 {
229 "DIV32 by 0, zero check 2",
230 .insns = {
231 BPF_MOV32_IMM(BPF_REG_0, 42),
232 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
233 BPF_MOV32_IMM(BPF_REG_2, 1),
234 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
235 BPF_EXIT_INSN(),
236 },
237 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100238 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100239 },
240 {
241 "DIV64 by 0, zero check",
242 .insns = {
243 BPF_MOV32_IMM(BPF_REG_0, 42),
244 BPF_MOV32_IMM(BPF_REG_1, 0),
245 BPF_MOV32_IMM(BPF_REG_2, 1),
246 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
247 BPF_EXIT_INSN(),
248 },
249 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100250 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100251 },
252 {
253 "MOD32 by 0, zero check 1",
254 .insns = {
255 BPF_MOV32_IMM(BPF_REG_0, 42),
256 BPF_MOV32_IMM(BPF_REG_1, 0),
257 BPF_MOV32_IMM(BPF_REG_2, 1),
258 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
259 BPF_EXIT_INSN(),
260 },
261 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100262 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100263 },
264 {
265 "MOD32 by 0, zero check 2",
266 .insns = {
267 BPF_MOV32_IMM(BPF_REG_0, 42),
268 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
269 BPF_MOV32_IMM(BPF_REG_2, 1),
270 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
271 BPF_EXIT_INSN(),
272 },
273 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100274 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100275 },
276 {
277 "MOD64 by 0, zero check",
278 .insns = {
279 BPF_MOV32_IMM(BPF_REG_0, 42),
280 BPF_MOV32_IMM(BPF_REG_1, 0),
281 BPF_MOV32_IMM(BPF_REG_2, 1),
282 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
283 BPF_EXIT_INSN(),
284 },
285 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100286 .retval = 42,
287 },
288 {
289 "DIV32 by 0, zero check ok, cls",
290 .insns = {
291 BPF_MOV32_IMM(BPF_REG_0, 42),
292 BPF_MOV32_IMM(BPF_REG_1, 2),
293 BPF_MOV32_IMM(BPF_REG_2, 16),
294 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
295 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
296 BPF_EXIT_INSN(),
297 },
298 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
299 .result = ACCEPT,
300 .retval = 8,
301 },
302 {
303 "DIV32 by 0, zero check 1, cls",
304 .insns = {
305 BPF_MOV32_IMM(BPF_REG_1, 0),
306 BPF_MOV32_IMM(BPF_REG_0, 1),
307 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
308 BPF_EXIT_INSN(),
309 },
310 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
311 .result = ACCEPT,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100312 .retval = 0,
313 },
314 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100315 "DIV32 by 0, zero check 2, cls",
316 .insns = {
317 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
318 BPF_MOV32_IMM(BPF_REG_0, 1),
319 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
320 BPF_EXIT_INSN(),
321 },
322 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
323 .result = ACCEPT,
324 .retval = 0,
325 },
326 {
327 "DIV64 by 0, zero check, cls",
328 .insns = {
329 BPF_MOV32_IMM(BPF_REG_1, 0),
330 BPF_MOV32_IMM(BPF_REG_0, 1),
331 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
332 BPF_EXIT_INSN(),
333 },
334 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
335 .result = ACCEPT,
336 .retval = 0,
337 },
338 {
339 "MOD32 by 0, zero check ok, cls",
340 .insns = {
341 BPF_MOV32_IMM(BPF_REG_0, 42),
342 BPF_MOV32_IMM(BPF_REG_1, 3),
343 BPF_MOV32_IMM(BPF_REG_2, 5),
344 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
345 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
346 BPF_EXIT_INSN(),
347 },
348 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
349 .result = ACCEPT,
350 .retval = 2,
351 },
352 {
353 "MOD32 by 0, zero check 1, cls",
354 .insns = {
355 BPF_MOV32_IMM(BPF_REG_1, 0),
356 BPF_MOV32_IMM(BPF_REG_0, 1),
357 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
358 BPF_EXIT_INSN(),
359 },
360 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
361 .result = ACCEPT,
362 .retval = 1,
363 },
364 {
365 "MOD32 by 0, zero check 2, cls",
366 .insns = {
367 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
368 BPF_MOV32_IMM(BPF_REG_0, 1),
369 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
370 BPF_EXIT_INSN(),
371 },
372 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
373 .result = ACCEPT,
374 .retval = 1,
375 },
376 {
377 "MOD64 by 0, zero check 1, cls",
378 .insns = {
379 BPF_MOV32_IMM(BPF_REG_1, 0),
380 BPF_MOV32_IMM(BPF_REG_0, 2),
381 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
382 BPF_EXIT_INSN(),
383 },
384 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
385 .result = ACCEPT,
386 .retval = 2,
387 },
388 {
389 "MOD64 by 0, zero check 2, cls",
390 .insns = {
391 BPF_MOV32_IMM(BPF_REG_1, 0),
392 BPF_MOV32_IMM(BPF_REG_0, -1),
393 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
394 BPF_EXIT_INSN(),
395 },
396 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
397 .result = ACCEPT,
398 .retval = -1,
399 },
400 /* Just make sure that JITs used udiv/umod as otherwise we get
401 * an exception from INT_MIN/-1 overflow similarly as with div
402 * by zero.
403 */
404 {
405 "DIV32 overflow, check 1",
406 .insns = {
407 BPF_MOV32_IMM(BPF_REG_1, -1),
408 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
409 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
410 BPF_EXIT_INSN(),
411 },
412 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
413 .result = ACCEPT,
414 .retval = 0,
415 },
416 {
417 "DIV32 overflow, check 2",
418 .insns = {
419 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
420 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
421 BPF_EXIT_INSN(),
422 },
423 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
424 .result = ACCEPT,
425 .retval = 0,
426 },
427 {
428 "DIV64 overflow, check 1",
429 .insns = {
430 BPF_MOV64_IMM(BPF_REG_1, -1),
431 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
432 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
433 BPF_EXIT_INSN(),
434 },
435 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
436 .result = ACCEPT,
437 .retval = 0,
438 },
439 {
440 "DIV64 overflow, check 2",
441 .insns = {
442 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
443 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
444 BPF_EXIT_INSN(),
445 },
446 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
447 .result = ACCEPT,
448 .retval = 0,
449 },
450 {
451 "MOD32 overflow, check 1",
452 .insns = {
453 BPF_MOV32_IMM(BPF_REG_1, -1),
454 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
455 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
456 BPF_EXIT_INSN(),
457 },
458 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
459 .result = ACCEPT,
460 .retval = INT_MIN,
461 },
462 {
463 "MOD32 overflow, check 2",
464 .insns = {
465 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
466 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
467 BPF_EXIT_INSN(),
468 },
469 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
470 .result = ACCEPT,
471 .retval = INT_MIN,
472 },
473 {
474 "MOD64 overflow, check 1",
475 .insns = {
476 BPF_MOV64_IMM(BPF_REG_1, -1),
477 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
478 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
479 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
480 BPF_MOV32_IMM(BPF_REG_0, 0),
481 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
482 BPF_MOV32_IMM(BPF_REG_0, 1),
483 BPF_EXIT_INSN(),
484 },
485 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
486 .result = ACCEPT,
487 .retval = 1,
488 },
489 {
490 "MOD64 overflow, check 2",
491 .insns = {
492 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
493 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
494 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
495 BPF_MOV32_IMM(BPF_REG_0, 0),
496 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
497 BPF_MOV32_IMM(BPF_REG_0, 1),
498 BPF_EXIT_INSN(),
499 },
500 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
501 .result = ACCEPT,
502 .retval = 1,
503 },
504 {
505 "xor32 zero extend check",
506 .insns = {
507 BPF_MOV32_IMM(BPF_REG_2, -1),
508 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
509 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
510 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
511 BPF_MOV32_IMM(BPF_REG_0, 2),
512 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
513 BPF_MOV32_IMM(BPF_REG_0, 1),
514 BPF_EXIT_INSN(),
515 },
516 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
517 .result = ACCEPT,
518 .retval = 1,
519 },
520 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100521 "empty prog",
522 .insns = {
523 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100524 .errstr = "unknown opcode 00",
Daniel Borkmann87c17932018-01-20 01:24:32 +0100525 .result = REJECT,
526 },
527 {
528 "only exit insn",
529 .insns = {
530 BPF_EXIT_INSN(),
531 },
532 .errstr = "R0 !read_ok",
533 .result = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700534 },
535 {
536 "unreachable",
537 .insns = {
538 BPF_EXIT_INSN(),
539 BPF_EXIT_INSN(),
540 },
541 .errstr = "unreachable",
542 .result = REJECT,
543 },
544 {
545 "unreachable2",
546 .insns = {
547 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
548 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
549 BPF_EXIT_INSN(),
550 },
551 .errstr = "unreachable",
552 .result = REJECT,
553 },
554 {
555 "out of range jump",
556 .insns = {
557 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
558 BPF_EXIT_INSN(),
559 },
560 .errstr = "jump out of range",
561 .result = REJECT,
562 },
563 {
564 "out of range jump2",
565 .insns = {
566 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
567 BPF_EXIT_INSN(),
568 },
569 .errstr = "jump out of range",
570 .result = REJECT,
571 },
572 {
573 "test1 ld_imm64",
574 .insns = {
575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
576 BPF_LD_IMM64(BPF_REG_0, 0),
577 BPF_LD_IMM64(BPF_REG_0, 0),
578 BPF_LD_IMM64(BPF_REG_0, 1),
579 BPF_LD_IMM64(BPF_REG_0, 1),
580 BPF_MOV64_IMM(BPF_REG_0, 2),
581 BPF_EXIT_INSN(),
582 },
583 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700584 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700585 .result = REJECT,
586 },
587 {
588 "test2 ld_imm64",
589 .insns = {
590 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
591 BPF_LD_IMM64(BPF_REG_0, 0),
592 BPF_LD_IMM64(BPF_REG_0, 0),
593 BPF_LD_IMM64(BPF_REG_0, 1),
594 BPF_LD_IMM64(BPF_REG_0, 1),
595 BPF_EXIT_INSN(),
596 },
597 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700598 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700599 .result = REJECT,
600 },
601 {
602 "test3 ld_imm64",
603 .insns = {
604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
605 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
606 BPF_LD_IMM64(BPF_REG_0, 0),
607 BPF_LD_IMM64(BPF_REG_0, 0),
608 BPF_LD_IMM64(BPF_REG_0, 1),
609 BPF_LD_IMM64(BPF_REG_0, 1),
610 BPF_EXIT_INSN(),
611 },
612 .errstr = "invalid bpf_ld_imm64 insn",
613 .result = REJECT,
614 },
615 {
616 "test4 ld_imm64",
617 .insns = {
618 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
619 BPF_EXIT_INSN(),
620 },
621 .errstr = "invalid bpf_ld_imm64 insn",
622 .result = REJECT,
623 },
624 {
625 "test5 ld_imm64",
626 .insns = {
627 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
628 },
629 .errstr = "invalid bpf_ld_imm64 insn",
630 .result = REJECT,
631 },
632 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200633 "test6 ld_imm64",
634 .insns = {
635 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
636 BPF_RAW_INSN(0, 0, 0, 0, 0),
637 BPF_EXIT_INSN(),
638 },
639 .result = ACCEPT,
640 },
641 {
642 "test7 ld_imm64",
643 .insns = {
644 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
645 BPF_RAW_INSN(0, 0, 0, 0, 1),
646 BPF_EXIT_INSN(),
647 },
648 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800649 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200650 },
651 {
652 "test8 ld_imm64",
653 .insns = {
654 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
655 BPF_RAW_INSN(0, 0, 0, 0, 1),
656 BPF_EXIT_INSN(),
657 },
658 .errstr = "uses reserved fields",
659 .result = REJECT,
660 },
661 {
662 "test9 ld_imm64",
663 .insns = {
664 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
665 BPF_RAW_INSN(0, 0, 0, 1, 1),
666 BPF_EXIT_INSN(),
667 },
668 .errstr = "invalid bpf_ld_imm64 insn",
669 .result = REJECT,
670 },
671 {
672 "test10 ld_imm64",
673 .insns = {
674 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
675 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
676 BPF_EXIT_INSN(),
677 },
678 .errstr = "invalid bpf_ld_imm64 insn",
679 .result = REJECT,
680 },
681 {
682 "test11 ld_imm64",
683 .insns = {
684 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
685 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
686 BPF_EXIT_INSN(),
687 },
688 .errstr = "invalid bpf_ld_imm64 insn",
689 .result = REJECT,
690 },
691 {
692 "test12 ld_imm64",
693 .insns = {
694 BPF_MOV64_IMM(BPF_REG_1, 0),
695 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
696 BPF_RAW_INSN(0, 0, 0, 0, 1),
697 BPF_EXIT_INSN(),
698 },
699 .errstr = "not pointing to valid bpf_map",
700 .result = REJECT,
701 },
702 {
703 "test13 ld_imm64",
704 .insns = {
705 BPF_MOV64_IMM(BPF_REG_1, 0),
706 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
707 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
708 BPF_EXIT_INSN(),
709 },
710 .errstr = "invalid bpf_ld_imm64 insn",
711 .result = REJECT,
712 },
713 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100714 "arsh32 on imm",
715 .insns = {
716 BPF_MOV64_IMM(BPF_REG_0, 1),
717 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
718 BPF_EXIT_INSN(),
719 },
720 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100721 .errstr = "unknown opcode c4",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100722 },
723 {
724 "arsh32 on reg",
725 .insns = {
726 BPF_MOV64_IMM(BPF_REG_0, 1),
727 BPF_MOV64_IMM(BPF_REG_1, 5),
728 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
729 BPF_EXIT_INSN(),
730 },
731 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100732 .errstr = "unknown opcode cc",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100733 },
734 {
735 "arsh64 on imm",
736 .insns = {
737 BPF_MOV64_IMM(BPF_REG_0, 1),
738 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
739 BPF_EXIT_INSN(),
740 },
741 .result = ACCEPT,
742 },
743 {
744 "arsh64 on reg",
745 .insns = {
746 BPF_MOV64_IMM(BPF_REG_0, 1),
747 BPF_MOV64_IMM(BPF_REG_1, 5),
748 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
749 BPF_EXIT_INSN(),
750 },
751 .result = ACCEPT,
752 },
753 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700754 "no bpf_exit",
755 .insns = {
756 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
757 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800758 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700759 .result = REJECT,
760 },
761 {
762 "loop (back-edge)",
763 .insns = {
764 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
765 BPF_EXIT_INSN(),
766 },
767 .errstr = "back-edge",
768 .result = REJECT,
769 },
770 {
771 "loop2 (back-edge)",
772 .insns = {
773 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
774 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
775 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
776 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
777 BPF_EXIT_INSN(),
778 },
779 .errstr = "back-edge",
780 .result = REJECT,
781 },
782 {
783 "conditional loop",
784 .insns = {
785 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
786 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
787 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
789 BPF_EXIT_INSN(),
790 },
791 .errstr = "back-edge",
792 .result = REJECT,
793 },
794 {
795 "read uninitialized register",
796 .insns = {
797 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
798 BPF_EXIT_INSN(),
799 },
800 .errstr = "R2 !read_ok",
801 .result = REJECT,
802 },
803 {
804 "read invalid register",
805 .insns = {
806 BPF_MOV64_REG(BPF_REG_0, -1),
807 BPF_EXIT_INSN(),
808 },
809 .errstr = "R15 is invalid",
810 .result = REJECT,
811 },
812 {
813 "program doesn't init R0 before exit",
814 .insns = {
815 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
816 BPF_EXIT_INSN(),
817 },
818 .errstr = "R0 !read_ok",
819 .result = REJECT,
820 },
821 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700822 "program doesn't init R0 before exit in all branches",
823 .insns = {
824 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
825 BPF_MOV64_IMM(BPF_REG_0, 1),
826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
827 BPF_EXIT_INSN(),
828 },
829 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700830 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700831 .result = REJECT,
832 },
833 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700834 "stack out of bounds",
835 .insns = {
836 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
837 BPF_EXIT_INSN(),
838 },
839 .errstr = "invalid stack",
840 .result = REJECT,
841 },
842 {
843 "invalid call insn1",
844 .insns = {
845 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
846 BPF_EXIT_INSN(),
847 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100848 .errstr = "unknown opcode 8d",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700849 .result = REJECT,
850 },
851 {
852 "invalid call insn2",
853 .insns = {
854 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
855 BPF_EXIT_INSN(),
856 },
857 .errstr = "BPF_CALL uses reserved",
858 .result = REJECT,
859 },
860 {
861 "invalid function call",
862 .insns = {
863 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
864 BPF_EXIT_INSN(),
865 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100866 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700867 .result = REJECT,
868 },
869 {
870 "uninitialized stack1",
871 .insns = {
872 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
874 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200875 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
876 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700877 BPF_EXIT_INSN(),
878 },
Prashant Bhole908142e2018-10-09 10:04:53 +0900879 .fixup_map_hash_8b = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700880 .errstr = "invalid indirect read from stack",
881 .result = REJECT,
882 },
883 {
884 "uninitialized stack2",
885 .insns = {
886 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
887 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
888 BPF_EXIT_INSN(),
889 },
890 .errstr = "invalid read from stack",
891 .result = REJECT,
892 },
893 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200894 "invalid fp arithmetic",
895 /* If this gets ever changed, make sure JITs can deal with it. */
896 .insns = {
897 BPF_MOV64_IMM(BPF_REG_0, 0),
898 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
899 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
900 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
901 BPF_EXIT_INSN(),
902 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800903 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200904 .result = REJECT,
905 },
906 {
907 "non-invalid fp arithmetic",
908 .insns = {
909 BPF_MOV64_IMM(BPF_REG_0, 0),
910 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
911 BPF_EXIT_INSN(),
912 },
913 .result = ACCEPT,
914 },
915 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200916 "invalid argument register",
917 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
919 BPF_FUNC_get_cgroup_classid),
920 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
921 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200922 BPF_EXIT_INSN(),
923 },
924 .errstr = "R1 !read_ok",
925 .result = REJECT,
926 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
927 },
928 {
929 "non-invalid argument register",
930 .insns = {
931 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
933 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200934 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200935 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
936 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200937 BPF_EXIT_INSN(),
938 },
939 .result = ACCEPT,
940 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
941 },
942 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700943 "check valid spill/fill",
944 .insns = {
945 /* spill R1(ctx) into stack */
946 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700947 /* fill it back into R2 */
948 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700949 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100950 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
951 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700952 BPF_EXIT_INSN(),
953 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700954 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700955 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700956 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800957 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700958 },
959 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200960 "check valid spill/fill, skb mark",
961 .insns = {
962 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
963 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
964 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
965 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
966 offsetof(struct __sk_buff, mark)),
967 BPF_EXIT_INSN(),
968 },
969 .result = ACCEPT,
970 .result_unpriv = ACCEPT,
971 },
972 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700973 "check corrupted spill/fill",
974 .insns = {
975 /* spill R1(ctx) into stack */
976 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700977 /* mess up with R1 pointer on stack */
978 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700979 /* fill back into R0 should fail */
980 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700981 BPF_EXIT_INSN(),
982 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700983 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700984 .errstr = "corrupted spill",
985 .result = REJECT,
986 },
987 {
988 "invalid src register in STX",
989 .insns = {
990 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
991 BPF_EXIT_INSN(),
992 },
993 .errstr = "R15 is invalid",
994 .result = REJECT,
995 },
996 {
997 "invalid dst register in STX",
998 .insns = {
999 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1000 BPF_EXIT_INSN(),
1001 },
1002 .errstr = "R14 is invalid",
1003 .result = REJECT,
1004 },
1005 {
1006 "invalid dst register in ST",
1007 .insns = {
1008 BPF_ST_MEM(BPF_B, 14, -1, -1),
1009 BPF_EXIT_INSN(),
1010 },
1011 .errstr = "R14 is invalid",
1012 .result = REJECT,
1013 },
1014 {
1015 "invalid src register in LDX",
1016 .insns = {
1017 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1018 BPF_EXIT_INSN(),
1019 },
1020 .errstr = "R12 is invalid",
1021 .result = REJECT,
1022 },
1023 {
1024 "invalid dst register in LDX",
1025 .insns = {
1026 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1027 BPF_EXIT_INSN(),
1028 },
1029 .errstr = "R11 is invalid",
1030 .result = REJECT,
1031 },
1032 {
1033 "junk insn",
1034 .insns = {
1035 BPF_RAW_INSN(0, 0, 0, 0, 0),
1036 BPF_EXIT_INSN(),
1037 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01001038 .errstr = "unknown opcode 00",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001039 .result = REJECT,
1040 },
1041 {
1042 "junk insn2",
1043 .insns = {
1044 BPF_RAW_INSN(1, 0, 0, 0, 0),
1045 BPF_EXIT_INSN(),
1046 },
1047 .errstr = "BPF_LDX uses reserved fields",
1048 .result = REJECT,
1049 },
1050 {
1051 "junk insn3",
1052 .insns = {
1053 BPF_RAW_INSN(-1, 0, 0, 0, 0),
1054 BPF_EXIT_INSN(),
1055 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01001056 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001057 .result = REJECT,
1058 },
1059 {
1060 "junk insn4",
1061 .insns = {
1062 BPF_RAW_INSN(-1, -1, -1, -1, -1),
1063 BPF_EXIT_INSN(),
1064 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01001065 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001066 .result = REJECT,
1067 },
1068 {
1069 "junk insn5",
1070 .insns = {
1071 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1072 BPF_EXIT_INSN(),
1073 },
1074 .errstr = "BPF_ALU uses reserved fields",
1075 .result = REJECT,
1076 },
1077 {
1078 "misaligned read from stack",
1079 .insns = {
1080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1081 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1082 BPF_EXIT_INSN(),
1083 },
Edward Creef65b1842017-08-07 15:27:12 +01001084 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001085 .result = REJECT,
1086 },
1087 {
1088 "invalid map_fd for function call",
1089 .insns = {
1090 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1091 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1093 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001094 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1095 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001096 BPF_EXIT_INSN(),
1097 },
1098 .errstr = "fd 0 is not pointing to valid bpf_map",
1099 .result = REJECT,
1100 },
1101 {
1102 "don't check return value before access",
1103 .insns = {
1104 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1105 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1107 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001108 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1109 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001110 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1111 BPF_EXIT_INSN(),
1112 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001113 .fixup_map_hash_8b = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001114 .errstr = "R0 invalid mem access 'map_value_or_null'",
1115 .result = REJECT,
1116 },
1117 {
1118 "access memory with incorrect alignment",
1119 .insns = {
1120 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1121 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1123 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1125 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1127 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1128 BPF_EXIT_INSN(),
1129 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001130 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01001131 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001132 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001133 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001134 },
1135 {
1136 "sometimes access memory with incorrect alignment",
1137 .insns = {
1138 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1139 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1141 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001142 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1143 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1145 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1146 BPF_EXIT_INSN(),
1147 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1148 BPF_EXIT_INSN(),
1149 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001150 .fixup_map_hash_8b = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001151 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001152 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001153 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001154 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001155 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001156 {
1157 "jump test 1",
1158 .insns = {
1159 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1160 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1162 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1164 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1166 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1168 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1170 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1172 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1173 BPF_MOV64_IMM(BPF_REG_0, 0),
1174 BPF_EXIT_INSN(),
1175 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001176 .errstr_unpriv = "R1 pointer comparison",
1177 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001178 .result = ACCEPT,
1179 },
1180 {
1181 "jump test 2",
1182 .insns = {
1183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1185 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1186 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1187 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1188 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1189 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1190 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1191 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1192 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1194 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1195 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1196 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1197 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1198 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1200 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1201 BPF_MOV64_IMM(BPF_REG_0, 0),
1202 BPF_EXIT_INSN(),
1203 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001204 .errstr_unpriv = "R1 pointer comparison",
1205 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001206 .result = ACCEPT,
1207 },
1208 {
1209 "jump test 3",
1210 .insns = {
1211 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1213 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1215 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1216 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1217 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1219 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1220 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1221 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1223 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1225 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1227 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1229 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1231 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1233 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1235 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1237 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001238 BPF_EXIT_INSN(),
1239 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001240 .fixup_map_hash_8b = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001241 .errstr_unpriv = "R1 pointer comparison",
1242 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001243 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001244 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001245 },
1246 {
1247 "jump test 4",
1248 .insns = {
1249 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1250 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1253 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1254 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1257 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1258 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1259 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1262 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1263 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1266 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1269 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1270 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1273 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1274 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1275 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1278 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1279 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1280 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1281 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1282 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1283 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1284 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1285 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1286 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1287 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1289 BPF_MOV64_IMM(BPF_REG_0, 0),
1290 BPF_EXIT_INSN(),
1291 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001292 .errstr_unpriv = "R1 pointer comparison",
1293 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001294 .result = ACCEPT,
1295 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001296 {
1297 "jump test 5",
1298 .insns = {
1299 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1300 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1301 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1302 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1303 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1304 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1305 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1306 BPF_MOV64_IMM(BPF_REG_0, 0),
1307 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1308 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1309 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1310 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1311 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1312 BPF_MOV64_IMM(BPF_REG_0, 0),
1313 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1314 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1315 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1316 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1317 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1318 BPF_MOV64_IMM(BPF_REG_0, 0),
1319 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1320 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1321 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1322 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1323 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1324 BPF_MOV64_IMM(BPF_REG_0, 0),
1325 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1326 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1327 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1328 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1329 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1330 BPF_MOV64_IMM(BPF_REG_0, 0),
1331 BPF_EXIT_INSN(),
1332 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001333 .errstr_unpriv = "R1 pointer comparison",
1334 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001335 .result = ACCEPT,
1336 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001337 {
1338 "access skb fields ok",
1339 .insns = {
1340 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1341 offsetof(struct __sk_buff, len)),
1342 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1343 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1344 offsetof(struct __sk_buff, mark)),
1345 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1346 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1347 offsetof(struct __sk_buff, pkt_type)),
1348 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1349 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1350 offsetof(struct __sk_buff, queue_mapping)),
1351 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001352 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1353 offsetof(struct __sk_buff, protocol)),
1354 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1355 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1356 offsetof(struct __sk_buff, vlan_present)),
1357 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1358 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1359 offsetof(struct __sk_buff, vlan_tci)),
1360 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +02001361 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1362 offsetof(struct __sk_buff, napi_id)),
1363 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001364 BPF_EXIT_INSN(),
1365 },
1366 .result = ACCEPT,
1367 },
1368 {
1369 "access skb fields bad1",
1370 .insns = {
1371 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1372 BPF_EXIT_INSN(),
1373 },
1374 .errstr = "invalid bpf_context access",
1375 .result = REJECT,
1376 },
1377 {
1378 "access skb fields bad2",
1379 .insns = {
1380 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1381 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1384 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1386 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001387 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1388 BPF_EXIT_INSN(),
1389 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1391 offsetof(struct __sk_buff, pkt_type)),
1392 BPF_EXIT_INSN(),
1393 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001394 .fixup_map_hash_8b = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001395 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001396 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001397 .result = REJECT,
1398 },
1399 {
1400 "access skb fields bad3",
1401 .insns = {
1402 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1403 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1404 offsetof(struct __sk_buff, pkt_type)),
1405 BPF_EXIT_INSN(),
1406 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1407 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1409 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001410 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1411 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001412 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1413 BPF_EXIT_INSN(),
1414 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1415 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1416 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001417 .fixup_map_hash_8b = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001418 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001419 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001420 .result = REJECT,
1421 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001422 {
1423 "access skb fields bad4",
1424 .insns = {
1425 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1426 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1427 offsetof(struct __sk_buff, len)),
1428 BPF_MOV64_IMM(BPF_REG_0, 0),
1429 BPF_EXIT_INSN(),
1430 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1431 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1433 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001434 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1435 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001436 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1437 BPF_EXIT_INSN(),
1438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1439 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1440 },
Prashant Bhole908142e2018-10-09 10:04:53 +09001441 .fixup_map_hash_8b = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001442 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001443 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001444 .result = REJECT,
1445 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001446 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001447 "invalid access __sk_buff family",
1448 .insns = {
1449 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1450 offsetof(struct __sk_buff, family)),
1451 BPF_EXIT_INSN(),
1452 },
1453 .errstr = "invalid bpf_context access",
1454 .result = REJECT,
1455 },
1456 {
1457 "invalid access __sk_buff remote_ip4",
1458 .insns = {
1459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1460 offsetof(struct __sk_buff, remote_ip4)),
1461 BPF_EXIT_INSN(),
1462 },
1463 .errstr = "invalid bpf_context access",
1464 .result = REJECT,
1465 },
1466 {
1467 "invalid access __sk_buff local_ip4",
1468 .insns = {
1469 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1470 offsetof(struct __sk_buff, local_ip4)),
1471 BPF_EXIT_INSN(),
1472 },
1473 .errstr = "invalid bpf_context access",
1474 .result = REJECT,
1475 },
1476 {
1477 "invalid access __sk_buff remote_ip6",
1478 .insns = {
1479 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1480 offsetof(struct __sk_buff, remote_ip6)),
1481 BPF_EXIT_INSN(),
1482 },
1483 .errstr = "invalid bpf_context access",
1484 .result = REJECT,
1485 },
1486 {
1487 "invalid access __sk_buff local_ip6",
1488 .insns = {
1489 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1490 offsetof(struct __sk_buff, local_ip6)),
1491 BPF_EXIT_INSN(),
1492 },
1493 .errstr = "invalid bpf_context access",
1494 .result = REJECT,
1495 },
1496 {
1497 "invalid access __sk_buff remote_port",
1498 .insns = {
1499 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1500 offsetof(struct __sk_buff, remote_port)),
1501 BPF_EXIT_INSN(),
1502 },
1503 .errstr = "invalid bpf_context access",
1504 .result = REJECT,
1505 },
1506 {
1507 "invalid access __sk_buff remote_port",
1508 .insns = {
1509 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1510 offsetof(struct __sk_buff, local_port)),
1511 BPF_EXIT_INSN(),
1512 },
1513 .errstr = "invalid bpf_context access",
1514 .result = REJECT,
1515 },
1516 {
1517 "valid access __sk_buff family",
1518 .insns = {
1519 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1520 offsetof(struct __sk_buff, family)),
1521 BPF_EXIT_INSN(),
1522 },
1523 .result = ACCEPT,
1524 .prog_type = BPF_PROG_TYPE_SK_SKB,
1525 },
1526 {
1527 "valid access __sk_buff remote_ip4",
1528 .insns = {
1529 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1530 offsetof(struct __sk_buff, remote_ip4)),
1531 BPF_EXIT_INSN(),
1532 },
1533 .result = ACCEPT,
1534 .prog_type = BPF_PROG_TYPE_SK_SKB,
1535 },
1536 {
1537 "valid access __sk_buff local_ip4",
1538 .insns = {
1539 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1540 offsetof(struct __sk_buff, local_ip4)),
1541 BPF_EXIT_INSN(),
1542 },
1543 .result = ACCEPT,
1544 .prog_type = BPF_PROG_TYPE_SK_SKB,
1545 },
1546 {
1547 "valid access __sk_buff remote_ip6",
1548 .insns = {
1549 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1550 offsetof(struct __sk_buff, remote_ip6[0])),
1551 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1552 offsetof(struct __sk_buff, remote_ip6[1])),
1553 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1554 offsetof(struct __sk_buff, remote_ip6[2])),
1555 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1556 offsetof(struct __sk_buff, remote_ip6[3])),
1557 BPF_EXIT_INSN(),
1558 },
1559 .result = ACCEPT,
1560 .prog_type = BPF_PROG_TYPE_SK_SKB,
1561 },
1562 {
1563 "valid access __sk_buff local_ip6",
1564 .insns = {
1565 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1566 offsetof(struct __sk_buff, local_ip6[0])),
1567 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1568 offsetof(struct __sk_buff, local_ip6[1])),
1569 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1570 offsetof(struct __sk_buff, local_ip6[2])),
1571 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1572 offsetof(struct __sk_buff, local_ip6[3])),
1573 BPF_EXIT_INSN(),
1574 },
1575 .result = ACCEPT,
1576 .prog_type = BPF_PROG_TYPE_SK_SKB,
1577 },
1578 {
1579 "valid access __sk_buff remote_port",
1580 .insns = {
1581 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1582 offsetof(struct __sk_buff, remote_port)),
1583 BPF_EXIT_INSN(),
1584 },
1585 .result = ACCEPT,
1586 .prog_type = BPF_PROG_TYPE_SK_SKB,
1587 },
1588 {
1589 "valid access __sk_buff remote_port",
1590 .insns = {
1591 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1592 offsetof(struct __sk_buff, local_port)),
1593 BPF_EXIT_INSN(),
1594 },
1595 .result = ACCEPT,
1596 .prog_type = BPF_PROG_TYPE_SK_SKB,
1597 },
1598 {
John Fastabended850542017-08-28 07:11:24 -07001599 "invalid access of tc_classid for SK_SKB",
1600 .insns = {
1601 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1602 offsetof(struct __sk_buff, tc_classid)),
1603 BPF_EXIT_INSN(),
1604 },
1605 .result = REJECT,
1606 .prog_type = BPF_PROG_TYPE_SK_SKB,
1607 .errstr = "invalid bpf_context access",
1608 },
1609 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001610 "invalid access of skb->mark for SK_SKB",
1611 .insns = {
1612 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1613 offsetof(struct __sk_buff, mark)),
1614 BPF_EXIT_INSN(),
1615 },
1616 .result = REJECT,
1617 .prog_type = BPF_PROG_TYPE_SK_SKB,
1618 .errstr = "invalid bpf_context access",
1619 },
1620 {
1621 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001622 .insns = {
1623 BPF_MOV64_IMM(BPF_REG_0, 0),
1624 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1625 offsetof(struct __sk_buff, mark)),
1626 BPF_EXIT_INSN(),
1627 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001628 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001629 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001630 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001631 },
1632 {
1633 "check skb->tc_index is writeable by SK_SKB",
1634 .insns = {
1635 BPF_MOV64_IMM(BPF_REG_0, 0),
1636 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1637 offsetof(struct __sk_buff, tc_index)),
1638 BPF_EXIT_INSN(),
1639 },
1640 .result = ACCEPT,
1641 .prog_type = BPF_PROG_TYPE_SK_SKB,
1642 },
1643 {
1644 "check skb->priority is writeable by SK_SKB",
1645 .insns = {
1646 BPF_MOV64_IMM(BPF_REG_0, 0),
1647 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1648 offsetof(struct __sk_buff, priority)),
1649 BPF_EXIT_INSN(),
1650 },
1651 .result = ACCEPT,
1652 .prog_type = BPF_PROG_TYPE_SK_SKB,
1653 },
1654 {
1655 "direct packet read for SK_SKB",
1656 .insns = {
1657 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1658 offsetof(struct __sk_buff, data)),
1659 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1660 offsetof(struct __sk_buff, data_end)),
1661 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1663 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1664 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1665 BPF_MOV64_IMM(BPF_REG_0, 0),
1666 BPF_EXIT_INSN(),
1667 },
1668 .result = ACCEPT,
1669 .prog_type = BPF_PROG_TYPE_SK_SKB,
1670 },
1671 {
1672 "direct packet write for SK_SKB",
1673 .insns = {
1674 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1675 offsetof(struct __sk_buff, data)),
1676 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1677 offsetof(struct __sk_buff, data_end)),
1678 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1680 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1681 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1682 BPF_MOV64_IMM(BPF_REG_0, 0),
1683 BPF_EXIT_INSN(),
1684 },
1685 .result = ACCEPT,
1686 .prog_type = BPF_PROG_TYPE_SK_SKB,
1687 },
1688 {
1689 "overlapping checks for direct packet access SK_SKB",
1690 .insns = {
1691 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1692 offsetof(struct __sk_buff, data)),
1693 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1694 offsetof(struct __sk_buff, data_end)),
1695 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1697 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1698 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1700 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1701 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1702 BPF_MOV64_IMM(BPF_REG_0, 0),
1703 BPF_EXIT_INSN(),
1704 },
1705 .result = ACCEPT,
1706 .prog_type = BPF_PROG_TYPE_SK_SKB,
1707 },
1708 {
John Fastabend4da0dca2018-05-17 14:17:03 -07001709 "valid access family in SK_MSG",
1710 .insns = {
1711 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1712 offsetof(struct sk_msg_md, family)),
1713 BPF_EXIT_INSN(),
1714 },
1715 .result = ACCEPT,
1716 .prog_type = BPF_PROG_TYPE_SK_MSG,
1717 },
1718 {
1719 "valid access remote_ip4 in SK_MSG",
1720 .insns = {
1721 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1722 offsetof(struct sk_msg_md, remote_ip4)),
1723 BPF_EXIT_INSN(),
1724 },
1725 .result = ACCEPT,
1726 .prog_type = BPF_PROG_TYPE_SK_MSG,
1727 },
1728 {
1729 "valid access local_ip4 in SK_MSG",
1730 .insns = {
1731 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1732 offsetof(struct sk_msg_md, local_ip4)),
1733 BPF_EXIT_INSN(),
1734 },
1735 .result = ACCEPT,
1736 .prog_type = BPF_PROG_TYPE_SK_MSG,
1737 },
1738 {
1739 "valid access remote_port in SK_MSG",
1740 .insns = {
1741 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1742 offsetof(struct sk_msg_md, remote_port)),
1743 BPF_EXIT_INSN(),
1744 },
1745 .result = ACCEPT,
1746 .prog_type = BPF_PROG_TYPE_SK_MSG,
1747 },
1748 {
1749 "valid access local_port in SK_MSG",
1750 .insns = {
1751 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1752 offsetof(struct sk_msg_md, local_port)),
1753 BPF_EXIT_INSN(),
1754 },
1755 .result = ACCEPT,
1756 .prog_type = BPF_PROG_TYPE_SK_MSG,
1757 },
1758 {
1759 "valid access remote_ip6 in SK_MSG",
1760 .insns = {
1761 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1762 offsetof(struct sk_msg_md, remote_ip6[0])),
1763 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1764 offsetof(struct sk_msg_md, remote_ip6[1])),
1765 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1766 offsetof(struct sk_msg_md, remote_ip6[2])),
1767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1768 offsetof(struct sk_msg_md, remote_ip6[3])),
1769 BPF_EXIT_INSN(),
1770 },
1771 .result = ACCEPT,
1772 .prog_type = BPF_PROG_TYPE_SK_SKB,
1773 },
1774 {
1775 "valid access local_ip6 in SK_MSG",
1776 .insns = {
1777 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1778 offsetof(struct sk_msg_md, local_ip6[0])),
1779 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1780 offsetof(struct sk_msg_md, local_ip6[1])),
1781 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1782 offsetof(struct sk_msg_md, local_ip6[2])),
1783 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1784 offsetof(struct sk_msg_md, local_ip6[3])),
1785 BPF_EXIT_INSN(),
1786 },
1787 .result = ACCEPT,
1788 .prog_type = BPF_PROG_TYPE_SK_SKB,
1789 },
1790 {
1791 "invalid 64B read of family in SK_MSG",
1792 .insns = {
1793 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1794 offsetof(struct sk_msg_md, family)),
1795 BPF_EXIT_INSN(),
1796 },
1797 .errstr = "invalid bpf_context access",
1798 .result = REJECT,
1799 .prog_type = BPF_PROG_TYPE_SK_MSG,
1800 },
1801 {
1802 "invalid read past end of SK_MSG",
1803 .insns = {
1804 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1805 offsetof(struct sk_msg_md, local_port) + 4),
1806 BPF_EXIT_INSN(),
1807 },
1808 .errstr = "R0 !read_ok",
1809 .result = REJECT,
1810 .prog_type = BPF_PROG_TYPE_SK_MSG,
1811 },
1812 {
1813 "invalid read offset in SK_MSG",
1814 .insns = {
1815 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1816 offsetof(struct sk_msg_md, family) + 1),
1817 BPF_EXIT_INSN(),
1818 },
1819 .errstr = "invalid bpf_context access",
1820 .result = REJECT,
1821 .prog_type = BPF_PROG_TYPE_SK_MSG,
1822 },
1823 {
John Fastabend1acc60b2018-03-18 12:57:36 -07001824 "direct packet read for SK_MSG",
1825 .insns = {
1826 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1827 offsetof(struct sk_msg_md, data)),
1828 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1829 offsetof(struct sk_msg_md, data_end)),
1830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1832 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1833 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1834 BPF_MOV64_IMM(BPF_REG_0, 0),
1835 BPF_EXIT_INSN(),
1836 },
1837 .result = ACCEPT,
1838 .prog_type = BPF_PROG_TYPE_SK_MSG,
1839 },
1840 {
1841 "direct packet write for SK_MSG",
1842 .insns = {
1843 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1844 offsetof(struct sk_msg_md, data)),
1845 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1846 offsetof(struct sk_msg_md, data_end)),
1847 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1849 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1850 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1851 BPF_MOV64_IMM(BPF_REG_0, 0),
1852 BPF_EXIT_INSN(),
1853 },
1854 .result = ACCEPT,
1855 .prog_type = BPF_PROG_TYPE_SK_MSG,
1856 },
1857 {
1858 "overlapping checks for direct packet access SK_MSG",
1859 .insns = {
1860 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1861 offsetof(struct sk_msg_md, data)),
1862 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1863 offsetof(struct sk_msg_md, data_end)),
1864 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1866 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1869 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1870 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1871 BPF_MOV64_IMM(BPF_REG_0, 0),
1872 BPF_EXIT_INSN(),
1873 },
1874 .result = ACCEPT,
1875 .prog_type = BPF_PROG_TYPE_SK_MSG,
1876 },
1877 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001878 "check skb->mark is not writeable by sockets",
1879 .insns = {
1880 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1881 offsetof(struct __sk_buff, mark)),
1882 BPF_EXIT_INSN(),
1883 },
1884 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001885 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001886 .result = REJECT,
1887 },
1888 {
1889 "check skb->tc_index is not writeable by sockets",
1890 .insns = {
1891 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1892 offsetof(struct __sk_buff, tc_index)),
1893 BPF_EXIT_INSN(),
1894 },
1895 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001896 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001897 .result = REJECT,
1898 },
1899 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001900 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001901 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001902 BPF_MOV64_IMM(BPF_REG_0, 0),
1903 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1904 offsetof(struct __sk_buff, cb[0])),
1905 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1906 offsetof(struct __sk_buff, cb[0]) + 1),
1907 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1908 offsetof(struct __sk_buff, cb[0]) + 2),
1909 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1910 offsetof(struct __sk_buff, cb[0]) + 3),
1911 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1912 offsetof(struct __sk_buff, cb[1])),
1913 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1914 offsetof(struct __sk_buff, cb[1]) + 1),
1915 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1916 offsetof(struct __sk_buff, cb[1]) + 2),
1917 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1918 offsetof(struct __sk_buff, cb[1]) + 3),
1919 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1920 offsetof(struct __sk_buff, cb[2])),
1921 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1922 offsetof(struct __sk_buff, cb[2]) + 1),
1923 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1924 offsetof(struct __sk_buff, cb[2]) + 2),
1925 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1926 offsetof(struct __sk_buff, cb[2]) + 3),
1927 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1928 offsetof(struct __sk_buff, cb[3])),
1929 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1930 offsetof(struct __sk_buff, cb[3]) + 1),
1931 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1932 offsetof(struct __sk_buff, cb[3]) + 2),
1933 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1934 offsetof(struct __sk_buff, cb[3]) + 3),
1935 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1936 offsetof(struct __sk_buff, cb[4])),
1937 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1938 offsetof(struct __sk_buff, cb[4]) + 1),
1939 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1940 offsetof(struct __sk_buff, cb[4]) + 2),
1941 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1942 offsetof(struct __sk_buff, cb[4]) + 3),
1943 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1944 offsetof(struct __sk_buff, cb[0])),
1945 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1946 offsetof(struct __sk_buff, cb[0]) + 1),
1947 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1948 offsetof(struct __sk_buff, cb[0]) + 2),
1949 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1950 offsetof(struct __sk_buff, cb[0]) + 3),
1951 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1952 offsetof(struct __sk_buff, cb[1])),
1953 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1954 offsetof(struct __sk_buff, cb[1]) + 1),
1955 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1956 offsetof(struct __sk_buff, cb[1]) + 2),
1957 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1958 offsetof(struct __sk_buff, cb[1]) + 3),
1959 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1960 offsetof(struct __sk_buff, cb[2])),
1961 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1962 offsetof(struct __sk_buff, cb[2]) + 1),
1963 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1964 offsetof(struct __sk_buff, cb[2]) + 2),
1965 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1966 offsetof(struct __sk_buff, cb[2]) + 3),
1967 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1968 offsetof(struct __sk_buff, cb[3])),
1969 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1970 offsetof(struct __sk_buff, cb[3]) + 1),
1971 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1972 offsetof(struct __sk_buff, cb[3]) + 2),
1973 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1974 offsetof(struct __sk_buff, cb[3]) + 3),
1975 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1976 offsetof(struct __sk_buff, cb[4])),
1977 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1978 offsetof(struct __sk_buff, cb[4]) + 1),
1979 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1980 offsetof(struct __sk_buff, cb[4]) + 2),
1981 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1982 offsetof(struct __sk_buff, cb[4]) + 3),
1983 BPF_EXIT_INSN(),
1984 },
1985 .result = ACCEPT,
1986 },
1987 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001988 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001989 .insns = {
1990 BPF_MOV64_IMM(BPF_REG_0, 0),
1991 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001992 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001993 BPF_EXIT_INSN(),
1994 },
1995 .errstr = "invalid bpf_context access",
1996 .result = REJECT,
1997 },
1998 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001999 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002000 .insns = {
2001 BPF_MOV64_IMM(BPF_REG_0, 0),
2002 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07002003 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002004 BPF_EXIT_INSN(),
2005 },
2006 .errstr = "invalid bpf_context access",
2007 .result = REJECT,
2008 },
2009 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002010 "check skb->hash byte load permitted",
2011 .insns = {
2012 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02002013#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002014 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2015 offsetof(struct __sk_buff, hash)),
2016#else
2017 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2018 offsetof(struct __sk_buff, hash) + 3),
2019#endif
2020 BPF_EXIT_INSN(),
2021 },
2022 .result = ACCEPT,
2023 },
2024 {
2025 "check skb->hash byte load not permitted 1",
2026 .insns = {
2027 BPF_MOV64_IMM(BPF_REG_0, 0),
2028 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2029 offsetof(struct __sk_buff, hash) + 1),
2030 BPF_EXIT_INSN(),
2031 },
2032 .errstr = "invalid bpf_context access",
2033 .result = REJECT,
2034 },
2035 {
2036 "check skb->hash byte load not permitted 2",
2037 .insns = {
2038 BPF_MOV64_IMM(BPF_REG_0, 0),
2039 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2040 offsetof(struct __sk_buff, hash) + 2),
2041 BPF_EXIT_INSN(),
2042 },
2043 .errstr = "invalid bpf_context access",
2044 .result = REJECT,
2045 },
2046 {
2047 "check skb->hash byte load not permitted 3",
2048 .insns = {
2049 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02002050#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2052 offsetof(struct __sk_buff, hash) + 3),
2053#else
2054 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2055 offsetof(struct __sk_buff, hash)),
2056#endif
2057 BPF_EXIT_INSN(),
2058 },
2059 .errstr = "invalid bpf_context access",
2060 .result = REJECT,
2061 },
2062 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01002063 "check cb access: byte, wrong type",
2064 .insns = {
2065 BPF_MOV64_IMM(BPF_REG_0, 0),
2066 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002067 offsetof(struct __sk_buff, cb[0])),
2068 BPF_EXIT_INSN(),
2069 },
2070 .errstr = "invalid bpf_context access",
2071 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002072 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2073 },
2074 {
2075 "check cb access: half",
2076 .insns = {
2077 BPF_MOV64_IMM(BPF_REG_0, 0),
2078 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2079 offsetof(struct __sk_buff, cb[0])),
2080 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2081 offsetof(struct __sk_buff, cb[0]) + 2),
2082 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2083 offsetof(struct __sk_buff, cb[1])),
2084 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2085 offsetof(struct __sk_buff, cb[1]) + 2),
2086 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2087 offsetof(struct __sk_buff, cb[2])),
2088 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2089 offsetof(struct __sk_buff, cb[2]) + 2),
2090 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2091 offsetof(struct __sk_buff, cb[3])),
2092 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2093 offsetof(struct __sk_buff, cb[3]) + 2),
2094 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2095 offsetof(struct __sk_buff, cb[4])),
2096 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2097 offsetof(struct __sk_buff, cb[4]) + 2),
2098 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2099 offsetof(struct __sk_buff, cb[0])),
2100 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2101 offsetof(struct __sk_buff, cb[0]) + 2),
2102 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2103 offsetof(struct __sk_buff, cb[1])),
2104 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2105 offsetof(struct __sk_buff, cb[1]) + 2),
2106 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2107 offsetof(struct __sk_buff, cb[2])),
2108 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2109 offsetof(struct __sk_buff, cb[2]) + 2),
2110 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2111 offsetof(struct __sk_buff, cb[3])),
2112 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2113 offsetof(struct __sk_buff, cb[3]) + 2),
2114 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2115 offsetof(struct __sk_buff, cb[4])),
2116 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2117 offsetof(struct __sk_buff, cb[4]) + 2),
2118 BPF_EXIT_INSN(),
2119 },
2120 .result = ACCEPT,
2121 },
2122 {
2123 "check cb access: half, unaligned",
2124 .insns = {
2125 BPF_MOV64_IMM(BPF_REG_0, 0),
2126 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2127 offsetof(struct __sk_buff, cb[0]) + 1),
2128 BPF_EXIT_INSN(),
2129 },
Edward Creef65b1842017-08-07 15:27:12 +01002130 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002131 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002132 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002133 },
2134 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002135 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002136 .insns = {
2137 BPF_MOV64_IMM(BPF_REG_0, 0),
2138 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07002139 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002140 BPF_EXIT_INSN(),
2141 },
2142 .errstr = "invalid bpf_context access",
2143 .result = REJECT,
2144 },
2145 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002146 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002147 .insns = {
2148 BPF_MOV64_IMM(BPF_REG_0, 0),
2149 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07002150 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002151 BPF_EXIT_INSN(),
2152 },
2153 .errstr = "invalid bpf_context access",
2154 .result = REJECT,
2155 },
2156 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002157 "check skb->hash half load permitted",
2158 .insns = {
2159 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02002160#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002161 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2162 offsetof(struct __sk_buff, hash)),
2163#else
2164 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2165 offsetof(struct __sk_buff, hash) + 2),
2166#endif
2167 BPF_EXIT_INSN(),
2168 },
2169 .result = ACCEPT,
2170 },
2171 {
2172 "check skb->hash half load not permitted",
2173 .insns = {
2174 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02002175#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07002176 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2177 offsetof(struct __sk_buff, hash) + 2),
2178#else
2179 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2180 offsetof(struct __sk_buff, hash)),
2181#endif
2182 BPF_EXIT_INSN(),
2183 },
2184 .errstr = "invalid bpf_context access",
2185 .result = REJECT,
2186 },
2187 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01002188 "check cb access: half, wrong type",
2189 .insns = {
2190 BPF_MOV64_IMM(BPF_REG_0, 0),
2191 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2192 offsetof(struct __sk_buff, cb[0])),
2193 BPF_EXIT_INSN(),
2194 },
2195 .errstr = "invalid bpf_context access",
2196 .result = REJECT,
2197 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2198 },
2199 {
2200 "check cb access: word",
2201 .insns = {
2202 BPF_MOV64_IMM(BPF_REG_0, 0),
2203 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2204 offsetof(struct __sk_buff, cb[0])),
2205 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2206 offsetof(struct __sk_buff, cb[1])),
2207 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2208 offsetof(struct __sk_buff, cb[2])),
2209 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2210 offsetof(struct __sk_buff, cb[3])),
2211 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2212 offsetof(struct __sk_buff, cb[4])),
2213 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2214 offsetof(struct __sk_buff, cb[0])),
2215 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2216 offsetof(struct __sk_buff, cb[1])),
2217 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2218 offsetof(struct __sk_buff, cb[2])),
2219 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2220 offsetof(struct __sk_buff, cb[3])),
2221 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2222 offsetof(struct __sk_buff, cb[4])),
2223 BPF_EXIT_INSN(),
2224 },
2225 .result = ACCEPT,
2226 },
2227 {
2228 "check cb access: word, unaligned 1",
2229 .insns = {
2230 BPF_MOV64_IMM(BPF_REG_0, 0),
2231 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2232 offsetof(struct __sk_buff, cb[0]) + 2),
2233 BPF_EXIT_INSN(),
2234 },
Edward Creef65b1842017-08-07 15:27:12 +01002235 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002236 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002237 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002238 },
2239 {
2240 "check cb access: word, unaligned 2",
2241 .insns = {
2242 BPF_MOV64_IMM(BPF_REG_0, 0),
2243 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2244 offsetof(struct __sk_buff, cb[4]) + 1),
2245 BPF_EXIT_INSN(),
2246 },
Edward Creef65b1842017-08-07 15:27:12 +01002247 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002248 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002249 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002250 },
2251 {
2252 "check cb access: word, unaligned 3",
2253 .insns = {
2254 BPF_MOV64_IMM(BPF_REG_0, 0),
2255 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2256 offsetof(struct __sk_buff, cb[4]) + 2),
2257 BPF_EXIT_INSN(),
2258 },
Edward Creef65b1842017-08-07 15:27:12 +01002259 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002260 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002261 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002262 },
2263 {
2264 "check cb access: word, unaligned 4",
2265 .insns = {
2266 BPF_MOV64_IMM(BPF_REG_0, 0),
2267 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2268 offsetof(struct __sk_buff, cb[4]) + 3),
2269 BPF_EXIT_INSN(),
2270 },
Edward Creef65b1842017-08-07 15:27:12 +01002271 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002272 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002273 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002274 },
2275 {
2276 "check cb access: double",
2277 .insns = {
2278 BPF_MOV64_IMM(BPF_REG_0, 0),
2279 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2280 offsetof(struct __sk_buff, cb[0])),
2281 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2282 offsetof(struct __sk_buff, cb[2])),
2283 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2284 offsetof(struct __sk_buff, cb[0])),
2285 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2286 offsetof(struct __sk_buff, cb[2])),
2287 BPF_EXIT_INSN(),
2288 },
2289 .result = ACCEPT,
2290 },
2291 {
2292 "check cb access: double, unaligned 1",
2293 .insns = {
2294 BPF_MOV64_IMM(BPF_REG_0, 0),
2295 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2296 offsetof(struct __sk_buff, cb[1])),
2297 BPF_EXIT_INSN(),
2298 },
Edward Creef65b1842017-08-07 15:27:12 +01002299 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002300 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002301 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002302 },
2303 {
2304 "check cb access: double, unaligned 2",
2305 .insns = {
2306 BPF_MOV64_IMM(BPF_REG_0, 0),
2307 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2308 offsetof(struct __sk_buff, cb[3])),
2309 BPF_EXIT_INSN(),
2310 },
Edward Creef65b1842017-08-07 15:27:12 +01002311 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002312 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002313 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002314 },
2315 {
2316 "check cb access: double, oob 1",
2317 .insns = {
2318 BPF_MOV64_IMM(BPF_REG_0, 0),
2319 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2320 offsetof(struct __sk_buff, cb[4])),
2321 BPF_EXIT_INSN(),
2322 },
2323 .errstr = "invalid bpf_context access",
2324 .result = REJECT,
2325 },
2326 {
2327 "check cb access: double, oob 2",
2328 .insns = {
2329 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002330 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2331 offsetof(struct __sk_buff, cb[4])),
2332 BPF_EXIT_INSN(),
2333 },
2334 .errstr = "invalid bpf_context access",
2335 .result = REJECT,
2336 },
2337 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002338 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002339 .insns = {
2340 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07002341 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2342 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002343 BPF_EXIT_INSN(),
2344 },
2345 .errstr = "invalid bpf_context access",
2346 .result = REJECT,
2347 },
2348 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002349 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002350 .insns = {
2351 BPF_MOV64_IMM(BPF_REG_0, 0),
2352 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07002353 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002354 BPF_EXIT_INSN(),
2355 },
2356 .errstr = "invalid bpf_context access",
2357 .result = REJECT,
2358 },
2359 {
2360 "check cb access: double, wrong type",
2361 .insns = {
2362 BPF_MOV64_IMM(BPF_REG_0, 0),
2363 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2364 offsetof(struct __sk_buff, cb[0])),
2365 BPF_EXIT_INSN(),
2366 },
2367 .errstr = "invalid bpf_context access",
2368 .result = REJECT,
2369 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002370 },
2371 {
2372 "check out of range skb->cb access",
2373 .insns = {
2374 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002375 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002376 BPF_EXIT_INSN(),
2377 },
2378 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002379 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002380 .result = REJECT,
2381 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2382 },
2383 {
2384 "write skb fields from socket prog",
2385 .insns = {
2386 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2387 offsetof(struct __sk_buff, cb[4])),
2388 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2389 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2390 offsetof(struct __sk_buff, mark)),
2391 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2392 offsetof(struct __sk_buff, tc_index)),
2393 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2394 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2395 offsetof(struct __sk_buff, cb[0])),
2396 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2397 offsetof(struct __sk_buff, cb[2])),
2398 BPF_EXIT_INSN(),
2399 },
2400 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002401 .errstr_unpriv = "R1 leaks addr",
2402 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002403 },
2404 {
2405 "write skb fields from tc_cls_act prog",
2406 .insns = {
2407 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2408 offsetof(struct __sk_buff, cb[0])),
2409 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2410 offsetof(struct __sk_buff, mark)),
2411 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2412 offsetof(struct __sk_buff, tc_index)),
2413 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2414 offsetof(struct __sk_buff, tc_index)),
2415 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2416 offsetof(struct __sk_buff, cb[3])),
2417 BPF_EXIT_INSN(),
2418 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002419 .errstr_unpriv = "",
2420 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002421 .result = ACCEPT,
2422 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2423 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002424 {
2425 "PTR_TO_STACK store/load",
2426 .insns = {
2427 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2429 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2430 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2431 BPF_EXIT_INSN(),
2432 },
2433 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002434 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002435 },
2436 {
2437 "PTR_TO_STACK store/load - bad alignment on off",
2438 .insns = {
2439 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2441 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2442 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2443 BPF_EXIT_INSN(),
2444 },
2445 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002446 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002447 },
2448 {
2449 "PTR_TO_STACK store/load - bad alignment on reg",
2450 .insns = {
2451 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2453 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2454 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2455 BPF_EXIT_INSN(),
2456 },
2457 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002458 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002459 },
2460 {
2461 "PTR_TO_STACK store/load - out of bounds low",
2462 .insns = {
2463 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2465 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2466 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2467 BPF_EXIT_INSN(),
2468 },
2469 .result = REJECT,
2470 .errstr = "invalid stack off=-79992 size=8",
2471 },
2472 {
2473 "PTR_TO_STACK store/load - out of bounds high",
2474 .insns = {
2475 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2477 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2478 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2479 BPF_EXIT_INSN(),
2480 },
2481 .result = REJECT,
2482 .errstr = "invalid stack off=0 size=8",
2483 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002484 {
2485 "unpriv: return pointer",
2486 .insns = {
2487 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2488 BPF_EXIT_INSN(),
2489 },
2490 .result = ACCEPT,
2491 .result_unpriv = REJECT,
2492 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002493 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002494 },
2495 {
2496 "unpriv: add const to pointer",
2497 .insns = {
2498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2499 BPF_MOV64_IMM(BPF_REG_0, 0),
2500 BPF_EXIT_INSN(),
2501 },
2502 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002503 },
2504 {
2505 "unpriv: add pointer to pointer",
2506 .insns = {
2507 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2508 BPF_MOV64_IMM(BPF_REG_0, 0),
2509 BPF_EXIT_INSN(),
2510 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002511 .result = REJECT,
2512 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002513 },
2514 {
2515 "unpriv: neg pointer",
2516 .insns = {
2517 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2518 BPF_MOV64_IMM(BPF_REG_0, 0),
2519 BPF_EXIT_INSN(),
2520 },
2521 .result = ACCEPT,
2522 .result_unpriv = REJECT,
2523 .errstr_unpriv = "R1 pointer arithmetic",
2524 },
2525 {
2526 "unpriv: cmp pointer with const",
2527 .insns = {
2528 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2529 BPF_MOV64_IMM(BPF_REG_0, 0),
2530 BPF_EXIT_INSN(),
2531 },
2532 .result = ACCEPT,
2533 .result_unpriv = REJECT,
2534 .errstr_unpriv = "R1 pointer comparison",
2535 },
2536 {
2537 "unpriv: cmp pointer with pointer",
2538 .insns = {
2539 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2540 BPF_MOV64_IMM(BPF_REG_0, 0),
2541 BPF_EXIT_INSN(),
2542 },
2543 .result = ACCEPT,
2544 .result_unpriv = REJECT,
2545 .errstr_unpriv = "R10 pointer comparison",
2546 },
2547 {
2548 "unpriv: check that printk is disallowed",
2549 .insns = {
2550 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2551 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2553 BPF_MOV64_IMM(BPF_REG_2, 8),
2554 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2556 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002557 BPF_MOV64_IMM(BPF_REG_0, 0),
2558 BPF_EXIT_INSN(),
2559 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01002560 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002561 .result_unpriv = REJECT,
2562 .result = ACCEPT,
2563 },
2564 {
2565 "unpriv: pass pointer to helper function",
2566 .insns = {
2567 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2570 BPF_LD_MAP_FD(BPF_REG_1, 0),
2571 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2572 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2574 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002575 BPF_MOV64_IMM(BPF_REG_0, 0),
2576 BPF_EXIT_INSN(),
2577 },
Prashant Bhole908142e2018-10-09 10:04:53 +09002578 .fixup_map_hash_8b = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002579 .errstr_unpriv = "R4 leaks addr",
2580 .result_unpriv = REJECT,
2581 .result = ACCEPT,
2582 },
2583 {
2584 "unpriv: indirectly pass pointer on stack to helper function",
2585 .insns = {
2586 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2587 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2589 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2591 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002592 BPF_MOV64_IMM(BPF_REG_0, 0),
2593 BPF_EXIT_INSN(),
2594 },
Prashant Bhole908142e2018-10-09 10:04:53 +09002595 .fixup_map_hash_8b = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002596 .errstr = "invalid indirect read from stack off -8+0 size 8",
2597 .result = REJECT,
2598 },
2599 {
2600 "unpriv: mangle pointer on stack 1",
2601 .insns = {
2602 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2603 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2604 BPF_MOV64_IMM(BPF_REG_0, 0),
2605 BPF_EXIT_INSN(),
2606 },
2607 .errstr_unpriv = "attempt to corrupt spilled",
2608 .result_unpriv = REJECT,
2609 .result = ACCEPT,
2610 },
2611 {
2612 "unpriv: mangle pointer on stack 2",
2613 .insns = {
2614 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2615 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2616 BPF_MOV64_IMM(BPF_REG_0, 0),
2617 BPF_EXIT_INSN(),
2618 },
2619 .errstr_unpriv = "attempt to corrupt spilled",
2620 .result_unpriv = REJECT,
2621 .result = ACCEPT,
2622 },
2623 {
2624 "unpriv: read pointer from stack in small chunks",
2625 .insns = {
2626 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2627 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2628 BPF_MOV64_IMM(BPF_REG_0, 0),
2629 BPF_EXIT_INSN(),
2630 },
2631 .errstr = "invalid size",
2632 .result = REJECT,
2633 },
2634 {
2635 "unpriv: write pointer into ctx",
2636 .insns = {
2637 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2638 BPF_MOV64_IMM(BPF_REG_0, 0),
2639 BPF_EXIT_INSN(),
2640 },
2641 .errstr_unpriv = "R1 leaks addr",
2642 .result_unpriv = REJECT,
2643 .errstr = "invalid bpf_context access",
2644 .result = REJECT,
2645 },
2646 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002647 "unpriv: spill/fill of ctx",
2648 .insns = {
2649 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2651 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2652 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2653 BPF_MOV64_IMM(BPF_REG_0, 0),
2654 BPF_EXIT_INSN(),
2655 },
2656 .result = ACCEPT,
2657 },
2658 {
2659 "unpriv: spill/fill of ctx 2",
2660 .insns = {
2661 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2663 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2664 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002665 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2666 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002667 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002668 BPF_EXIT_INSN(),
2669 },
2670 .result = ACCEPT,
2671 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2672 },
2673 {
2674 "unpriv: spill/fill of ctx 3",
2675 .insns = {
2676 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2677 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2678 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2679 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2680 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2682 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002683 BPF_EXIT_INSN(),
2684 },
2685 .result = REJECT,
2686 .errstr = "R1 type=fp expected=ctx",
2687 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2688 },
2689 {
2690 "unpriv: spill/fill of ctx 4",
2691 .insns = {
2692 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2693 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2694 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2695 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002696 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2697 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002698 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2700 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002701 BPF_EXIT_INSN(),
2702 },
2703 .result = REJECT,
2704 .errstr = "R1 type=inv expected=ctx",
2705 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2706 },
2707 {
2708 "unpriv: spill/fill of different pointers stx",
2709 .insns = {
2710 BPF_MOV64_IMM(BPF_REG_3, 42),
2711 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2712 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2713 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2714 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2716 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2717 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2718 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2719 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2720 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2721 offsetof(struct __sk_buff, mark)),
2722 BPF_MOV64_IMM(BPF_REG_0, 0),
2723 BPF_EXIT_INSN(),
2724 },
2725 .result = REJECT,
2726 .errstr = "same insn cannot be used with different pointers",
2727 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2728 },
2729 {
Joe Stringerb584ab82018-10-02 13:35:38 -07002730 "unpriv: spill/fill of different pointers stx - ctx and sock",
2731 .insns = {
2732 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2733 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2734 BPF_SK_LOOKUP,
2735 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2736 /* u64 foo; */
2737 /* void *target = &foo; */
2738 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2741 /* if (skb == NULL) *target = sock; */
2742 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2743 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2744 /* else *target = skb; */
2745 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2746 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2747 /* struct __sk_buff *skb = *target; */
2748 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2749 /* skb->mark = 42; */
2750 BPF_MOV64_IMM(BPF_REG_3, 42),
2751 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2752 offsetof(struct __sk_buff, mark)),
2753 /* if (sk) bpf_sk_release(sk) */
2754 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2755 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2756 BPF_MOV64_IMM(BPF_REG_0, 0),
2757 BPF_EXIT_INSN(),
2758 },
2759 .result = REJECT,
2760 .errstr = "type=ctx expected=sock",
2761 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2762 },
2763 {
2764 "unpriv: spill/fill of different pointers stx - leak sock",
2765 .insns = {
2766 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2767 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2768 BPF_SK_LOOKUP,
2769 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2770 /* u64 foo; */
2771 /* void *target = &foo; */
2772 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2774 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2775 /* if (skb == NULL) *target = sock; */
2776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2777 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2778 /* else *target = skb; */
2779 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2780 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2781 /* struct __sk_buff *skb = *target; */
2782 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2783 /* skb->mark = 42; */
2784 BPF_MOV64_IMM(BPF_REG_3, 42),
2785 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2786 offsetof(struct __sk_buff, mark)),
2787 BPF_EXIT_INSN(),
2788 },
2789 .result = REJECT,
2790 //.errstr = "same insn cannot be used with different pointers",
2791 .errstr = "Unreleased reference",
2792 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2793 },
2794 {
2795 "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2796 .insns = {
2797 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2798 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2799 BPF_SK_LOOKUP,
2800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2801 /* u64 foo; */
2802 /* void *target = &foo; */
2803 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2805 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2806 /* if (skb) *target = skb */
2807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2808 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2809 /* else *target = sock */
2810 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2811 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2812 /* struct bpf_sock *sk = *target; */
2813 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2814 /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2815 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2816 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2817 offsetof(struct bpf_sock, mark)),
2818 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2819 BPF_MOV64_IMM(BPF_REG_0, 0),
2820 BPF_EXIT_INSN(),
2821 },
2822 .result = REJECT,
2823 .errstr = "same insn cannot be used with different pointers",
2824 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2825 },
2826 {
2827 "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2828 .insns = {
2829 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2830 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2831 BPF_SK_LOOKUP,
2832 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2833 /* u64 foo; */
2834 /* void *target = &foo; */
2835 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2838 /* if (skb) *target = skb */
2839 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2840 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2841 /* else *target = sock */
2842 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2843 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2844 /* struct bpf_sock *sk = *target; */
2845 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2846 /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2848 BPF_MOV64_IMM(BPF_REG_3, 42),
2849 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2850 offsetof(struct bpf_sock, mark)),
2851 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2852 BPF_MOV64_IMM(BPF_REG_0, 0),
2853 BPF_EXIT_INSN(),
2854 },
2855 .result = REJECT,
2856 //.errstr = "same insn cannot be used with different pointers",
2857 .errstr = "cannot write into socket",
2858 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2859 },
2860 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002861 "unpriv: spill/fill of different pointers ldx",
2862 .insns = {
2863 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2865 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2866 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2868 -(__s32)offsetof(struct bpf_perf_event_data,
2869 sample_period) - 8),
2870 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2871 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2872 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2873 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2874 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2875 offsetof(struct bpf_perf_event_data,
2876 sample_period)),
2877 BPF_MOV64_IMM(BPF_REG_0, 0),
2878 BPF_EXIT_INSN(),
2879 },
2880 .result = REJECT,
2881 .errstr = "same insn cannot be used with different pointers",
2882 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2883 },
2884 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002885 "unpriv: write pointer into map elem value",
2886 .insns = {
2887 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2888 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2890 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002891 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2892 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002893 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2894 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2895 BPF_EXIT_INSN(),
2896 },
Prashant Bhole908142e2018-10-09 10:04:53 +09002897 .fixup_map_hash_8b = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002898 .errstr_unpriv = "R0 leaks addr",
2899 .result_unpriv = REJECT,
2900 .result = ACCEPT,
2901 },
2902 {
2903 "unpriv: partial copy of pointer",
2904 .insns = {
2905 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2906 BPF_MOV64_IMM(BPF_REG_0, 0),
2907 BPF_EXIT_INSN(),
2908 },
2909 .errstr_unpriv = "R10 partial copy",
2910 .result_unpriv = REJECT,
2911 .result = ACCEPT,
2912 },
2913 {
2914 "unpriv: pass pointer to tail_call",
2915 .insns = {
2916 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2917 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2919 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002920 BPF_MOV64_IMM(BPF_REG_0, 0),
2921 BPF_EXIT_INSN(),
2922 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02002923 .fixup_prog1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002924 .errstr_unpriv = "R3 leaks addr into helper",
2925 .result_unpriv = REJECT,
2926 .result = ACCEPT,
2927 },
2928 {
2929 "unpriv: cmp map pointer with zero",
2930 .insns = {
2931 BPF_MOV64_IMM(BPF_REG_1, 0),
2932 BPF_LD_MAP_FD(BPF_REG_1, 0),
2933 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2934 BPF_MOV64_IMM(BPF_REG_0, 0),
2935 BPF_EXIT_INSN(),
2936 },
Prashant Bhole908142e2018-10-09 10:04:53 +09002937 .fixup_map_hash_8b = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002938 .errstr_unpriv = "R1 pointer comparison",
2939 .result_unpriv = REJECT,
2940 .result = ACCEPT,
2941 },
2942 {
2943 "unpriv: write into frame pointer",
2944 .insns = {
2945 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2946 BPF_MOV64_IMM(BPF_REG_0, 0),
2947 BPF_EXIT_INSN(),
2948 },
2949 .errstr = "frame pointer is read only",
2950 .result = REJECT,
2951 },
2952 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002953 "unpriv: spill/fill frame pointer",
2954 .insns = {
2955 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2957 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2958 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2959 BPF_MOV64_IMM(BPF_REG_0, 0),
2960 BPF_EXIT_INSN(),
2961 },
2962 .errstr = "frame pointer is read only",
2963 .result = REJECT,
2964 },
2965 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002966 "unpriv: cmp of frame pointer",
2967 .insns = {
2968 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2969 BPF_MOV64_IMM(BPF_REG_0, 0),
2970 BPF_EXIT_INSN(),
2971 },
2972 .errstr_unpriv = "R10 pointer comparison",
2973 .result_unpriv = REJECT,
2974 .result = ACCEPT,
2975 },
2976 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002977 "unpriv: adding of fp",
2978 .insns = {
2979 BPF_MOV64_IMM(BPF_REG_0, 0),
2980 BPF_MOV64_IMM(BPF_REG_1, 0),
2981 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2982 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2983 BPF_EXIT_INSN(),
2984 },
Edward Creef65b1842017-08-07 15:27:12 +01002985 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002986 },
2987 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002988 "unpriv: cmp of stack pointer",
2989 .insns = {
2990 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2992 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2993 BPF_MOV64_IMM(BPF_REG_0, 0),
2994 BPF_EXIT_INSN(),
2995 },
2996 .errstr_unpriv = "R2 pointer comparison",
2997 .result_unpriv = REJECT,
2998 .result = ACCEPT,
2999 },
3000 {
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003001 "runtime/jit: tail_call within bounds, prog once",
3002 .insns = {
3003 BPF_MOV64_IMM(BPF_REG_3, 0),
3004 BPF_LD_MAP_FD(BPF_REG_2, 0),
3005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3006 BPF_FUNC_tail_call),
3007 BPF_MOV64_IMM(BPF_REG_0, 1),
3008 BPF_EXIT_INSN(),
3009 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003010 .fixup_prog1 = { 1 },
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003011 .result = ACCEPT,
3012 .retval = 42,
3013 },
3014 {
3015 "runtime/jit: tail_call within bounds, prog loop",
3016 .insns = {
3017 BPF_MOV64_IMM(BPF_REG_3, 1),
3018 BPF_LD_MAP_FD(BPF_REG_2, 0),
3019 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3020 BPF_FUNC_tail_call),
3021 BPF_MOV64_IMM(BPF_REG_0, 1),
3022 BPF_EXIT_INSN(),
3023 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003024 .fixup_prog1 = { 1 },
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003025 .result = ACCEPT,
3026 .retval = 41,
3027 },
3028 {
3029 "runtime/jit: tail_call within bounds, no prog",
3030 .insns = {
3031 BPF_MOV64_IMM(BPF_REG_3, 2),
3032 BPF_LD_MAP_FD(BPF_REG_2, 0),
3033 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3034 BPF_FUNC_tail_call),
3035 BPF_MOV64_IMM(BPF_REG_0, 1),
3036 BPF_EXIT_INSN(),
3037 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003038 .fixup_prog1 = { 1 },
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003039 .result = ACCEPT,
3040 .retval = 1,
3041 },
3042 {
3043 "runtime/jit: tail_call out of bounds",
3044 .insns = {
3045 BPF_MOV64_IMM(BPF_REG_3, 256),
3046 BPF_LD_MAP_FD(BPF_REG_2, 0),
3047 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3048 BPF_FUNC_tail_call),
3049 BPF_MOV64_IMM(BPF_REG_0, 2),
3050 BPF_EXIT_INSN(),
3051 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003052 .fixup_prog1 = { 1 },
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003053 .result = ACCEPT,
3054 .retval = 2,
3055 },
3056 {
Daniel Borkmann16338a92018-02-23 01:03:43 +01003057 "runtime/jit: pass negative index to tail_call",
3058 .insns = {
3059 BPF_MOV64_IMM(BPF_REG_3, -1),
3060 BPF_LD_MAP_FD(BPF_REG_2, 0),
3061 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3062 BPF_FUNC_tail_call),
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003063 BPF_MOV64_IMM(BPF_REG_0, 2),
Daniel Borkmann16338a92018-02-23 01:03:43 +01003064 BPF_EXIT_INSN(),
3065 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003066 .fixup_prog1 = { 1 },
Daniel Borkmann16338a92018-02-23 01:03:43 +01003067 .result = ACCEPT,
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003068 .retval = 2,
Daniel Borkmann16338a92018-02-23 01:03:43 +01003069 },
3070 {
3071 "runtime/jit: pass > 32bit index to tail_call",
3072 .insns = {
3073 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3074 BPF_LD_MAP_FD(BPF_REG_2, 0),
3075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3076 BPF_FUNC_tail_call),
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003077 BPF_MOV64_IMM(BPF_REG_0, 2),
Daniel Borkmann16338a92018-02-23 01:03:43 +01003078 BPF_EXIT_INSN(),
3079 },
Daniel Borkmann06be0862018-06-02 23:06:31 +02003080 .fixup_prog1 = { 2 },
Daniel Borkmann16338a92018-02-23 01:03:43 +01003081 .result = ACCEPT,
Daniel Borkmannb33eb732018-02-26 22:34:33 +01003082 .retval = 42,
Daniel Borkmann16338a92018-02-23 01:03:43 +01003083 },
3084 {
Yonghong Song332270f2017-04-29 22:52:42 -07003085 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07003086 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07003087 BPF_MOV64_IMM(BPF_REG_1, 4),
3088 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3089 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3092 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3093 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3094 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3095 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3097 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07003098 BPF_MOV64_IMM(BPF_REG_0, 0),
3099 BPF_EXIT_INSN(),
3100 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07003101 .result = ACCEPT,
3102 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003103 {
3104 "raw_stack: no skb_load_bytes",
3105 .insns = {
3106 BPF_MOV64_IMM(BPF_REG_2, 4),
3107 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3109 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3110 BPF_MOV64_IMM(BPF_REG_4, 8),
3111 /* Call to skb_load_bytes() omitted. */
3112 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3113 BPF_EXIT_INSN(),
3114 },
3115 .result = REJECT,
3116 .errstr = "invalid read from stack off -8+0 size 8",
3117 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3118 },
3119 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003120 "raw_stack: skb_load_bytes, negative len",
3121 .insns = {
3122 BPF_MOV64_IMM(BPF_REG_2, 4),
3123 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3125 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3126 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3128 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003129 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3130 BPF_EXIT_INSN(),
3131 },
3132 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003133 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003134 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3135 },
3136 {
3137 "raw_stack: skb_load_bytes, negative len 2",
3138 .insns = {
3139 BPF_MOV64_IMM(BPF_REG_2, 4),
3140 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3142 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3143 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003144 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3145 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003146 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3147 BPF_EXIT_INSN(),
3148 },
3149 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003150 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003151 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3152 },
3153 {
3154 "raw_stack: skb_load_bytes, zero len",
3155 .insns = {
3156 BPF_MOV64_IMM(BPF_REG_2, 4),
3157 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3159 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3160 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003161 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3162 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003163 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3164 BPF_EXIT_INSN(),
3165 },
3166 .result = REJECT,
3167 .errstr = "invalid stack type R3",
3168 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3169 },
3170 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003171 "raw_stack: skb_load_bytes, no init",
3172 .insns = {
3173 BPF_MOV64_IMM(BPF_REG_2, 4),
3174 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3176 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3177 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3179 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003180 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3181 BPF_EXIT_INSN(),
3182 },
3183 .result = ACCEPT,
3184 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3185 },
3186 {
3187 "raw_stack: skb_load_bytes, init",
3188 .insns = {
3189 BPF_MOV64_IMM(BPF_REG_2, 4),
3190 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3192 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3193 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3194 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003195 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3196 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003197 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3198 BPF_EXIT_INSN(),
3199 },
3200 .result = ACCEPT,
3201 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3202 },
3203 {
3204 "raw_stack: skb_load_bytes, spilled regs around bounds",
3205 .insns = {
3206 BPF_MOV64_IMM(BPF_REG_2, 4),
3207 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003209 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3210 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003211 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3212 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3214 BPF_FUNC_skb_load_bytes),
3215 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3216 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003217 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3218 offsetof(struct __sk_buff, mark)),
3219 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3220 offsetof(struct __sk_buff, priority)),
3221 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3222 BPF_EXIT_INSN(),
3223 },
3224 .result = ACCEPT,
3225 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3226 },
3227 {
3228 "raw_stack: skb_load_bytes, spilled regs corruption",
3229 .insns = {
3230 BPF_MOV64_IMM(BPF_REG_2, 4),
3231 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003233 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003234 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3235 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3237 BPF_FUNC_skb_load_bytes),
3238 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003239 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3240 offsetof(struct __sk_buff, mark)),
3241 BPF_EXIT_INSN(),
3242 },
3243 .result = REJECT,
3244 .errstr = "R0 invalid mem access 'inv'",
3245 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3246 },
3247 {
3248 "raw_stack: skb_load_bytes, spilled regs corruption 2",
3249 .insns = {
3250 BPF_MOV64_IMM(BPF_REG_2, 4),
3251 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003253 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3254 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3255 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003256 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3257 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3259 BPF_FUNC_skb_load_bytes),
3260 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3261 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3262 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003263 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3264 offsetof(struct __sk_buff, mark)),
3265 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3266 offsetof(struct __sk_buff, priority)),
3267 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3268 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3269 offsetof(struct __sk_buff, pkt_type)),
3270 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3271 BPF_EXIT_INSN(),
3272 },
3273 .result = REJECT,
3274 .errstr = "R3 invalid mem access 'inv'",
3275 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3276 },
3277 {
3278 "raw_stack: skb_load_bytes, spilled regs + data",
3279 .insns = {
3280 BPF_MOV64_IMM(BPF_REG_2, 4),
3281 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003283 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3284 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3285 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003286 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3287 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003288 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3289 BPF_FUNC_skb_load_bytes),
3290 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3291 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3292 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003293 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3294 offsetof(struct __sk_buff, mark)),
3295 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3296 offsetof(struct __sk_buff, priority)),
3297 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3298 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3299 BPF_EXIT_INSN(),
3300 },
3301 .result = ACCEPT,
3302 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3303 },
3304 {
3305 "raw_stack: skb_load_bytes, invalid access 1",
3306 .insns = {
3307 BPF_MOV64_IMM(BPF_REG_2, 4),
3308 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3310 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3311 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3313 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003314 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3315 BPF_EXIT_INSN(),
3316 },
3317 .result = REJECT,
3318 .errstr = "invalid stack type R3 off=-513 access_size=8",
3319 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3320 },
3321 {
3322 "raw_stack: skb_load_bytes, invalid access 2",
3323 .insns = {
3324 BPF_MOV64_IMM(BPF_REG_2, 4),
3325 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3327 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3328 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003329 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3330 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003331 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3332 BPF_EXIT_INSN(),
3333 },
3334 .result = REJECT,
3335 .errstr = "invalid stack type R3 off=-1 access_size=8",
3336 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3337 },
3338 {
3339 "raw_stack: skb_load_bytes, invalid access 3",
3340 .insns = {
3341 BPF_MOV64_IMM(BPF_REG_2, 4),
3342 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3344 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3345 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3347 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003348 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3349 BPF_EXIT_INSN(),
3350 },
3351 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003352 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003353 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3354 },
3355 {
3356 "raw_stack: skb_load_bytes, invalid access 4",
3357 .insns = {
3358 BPF_MOV64_IMM(BPF_REG_2, 4),
3359 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3361 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3362 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3364 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003365 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3366 BPF_EXIT_INSN(),
3367 },
3368 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003369 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003370 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3371 },
3372 {
3373 "raw_stack: skb_load_bytes, invalid access 5",
3374 .insns = {
3375 BPF_MOV64_IMM(BPF_REG_2, 4),
3376 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3378 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3379 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3381 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003382 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3383 BPF_EXIT_INSN(),
3384 },
3385 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003386 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003387 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3388 },
3389 {
3390 "raw_stack: skb_load_bytes, invalid access 6",
3391 .insns = {
3392 BPF_MOV64_IMM(BPF_REG_2, 4),
3393 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3395 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3396 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3398 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003399 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3400 BPF_EXIT_INSN(),
3401 },
3402 .result = REJECT,
3403 .errstr = "invalid stack type R3 off=-512 access_size=0",
3404 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3405 },
3406 {
3407 "raw_stack: skb_load_bytes, large access",
3408 .insns = {
3409 BPF_MOV64_IMM(BPF_REG_2, 4),
3410 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3412 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3413 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003414 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3415 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02003416 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3417 BPF_EXIT_INSN(),
3418 },
3419 .result = ACCEPT,
3420 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3421 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003422 {
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003423 "context stores via ST",
3424 .insns = {
3425 BPF_MOV64_IMM(BPF_REG_0, 0),
3426 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3427 BPF_EXIT_INSN(),
3428 },
Joe Stringer9d2be442018-10-02 13:35:31 -07003429 .errstr = "BPF_ST stores into R1 inv is not allowed",
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003430 .result = REJECT,
3431 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3432 },
3433 {
3434 "context stores via XADD",
3435 .insns = {
3436 BPF_MOV64_IMM(BPF_REG_0, 0),
3437 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3438 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3439 BPF_EXIT_INSN(),
3440 },
Joe Stringer9d2be442018-10-02 13:35:31 -07003441 .errstr = "BPF_XADD stores into R1 inv is not allowed",
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01003442 .result = REJECT,
3443 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3444 },
3445 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003446 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003447 .insns = {
3448 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3449 offsetof(struct __sk_buff, data)),
3450 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3451 offsetof(struct __sk_buff, data_end)),
3452 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3454 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3455 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3456 BPF_MOV64_IMM(BPF_REG_0, 0),
3457 BPF_EXIT_INSN(),
3458 },
3459 .result = ACCEPT,
3460 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3461 },
3462 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003463 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003464 .insns = {
3465 BPF_MOV64_IMM(BPF_REG_0, 1),
3466 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3467 offsetof(struct __sk_buff, data_end)),
3468 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3469 offsetof(struct __sk_buff, data)),
3470 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3471 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3472 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3474 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3475 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3476 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3477 offsetof(struct __sk_buff, data)),
3478 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003479 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3480 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01003481 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3482 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003483 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3484 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3486 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3487 offsetof(struct __sk_buff, data_end)),
3488 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3489 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3490 BPF_MOV64_IMM(BPF_REG_0, 0),
3491 BPF_EXIT_INSN(),
3492 },
3493 .result = ACCEPT,
3494 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3495 },
3496 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003497 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003498 .insns = {
3499 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3500 offsetof(struct __sk_buff, data)),
3501 BPF_MOV64_IMM(BPF_REG_0, 0),
3502 BPF_EXIT_INSN(),
3503 },
3504 .errstr = "invalid bpf_context access off=76",
3505 .result = REJECT,
3506 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3507 },
3508 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003509 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003510 .insns = {
3511 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3512 offsetof(struct __sk_buff, data)),
3513 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3514 offsetof(struct __sk_buff, data_end)),
3515 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3517 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3518 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3519 BPF_MOV64_IMM(BPF_REG_0, 0),
3520 BPF_EXIT_INSN(),
3521 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003522 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003523 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3524 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003525 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02003526 "direct packet access: test5 (pkt_end >= reg, good access)",
3527 .insns = {
3528 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3529 offsetof(struct __sk_buff, data)),
3530 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3531 offsetof(struct __sk_buff, data_end)),
3532 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3534 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3535 BPF_MOV64_IMM(BPF_REG_0, 1),
3536 BPF_EXIT_INSN(),
3537 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3538 BPF_MOV64_IMM(BPF_REG_0, 0),
3539 BPF_EXIT_INSN(),
3540 },
3541 .result = ACCEPT,
3542 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3543 },
3544 {
3545 "direct packet access: test6 (pkt_end >= reg, bad access)",
3546 .insns = {
3547 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3548 offsetof(struct __sk_buff, data)),
3549 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3550 offsetof(struct __sk_buff, data_end)),
3551 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3553 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3554 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3555 BPF_MOV64_IMM(BPF_REG_0, 1),
3556 BPF_EXIT_INSN(),
3557 BPF_MOV64_IMM(BPF_REG_0, 0),
3558 BPF_EXIT_INSN(),
3559 },
3560 .errstr = "invalid access to packet",
3561 .result = REJECT,
3562 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3563 },
3564 {
3565 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3566 .insns = {
3567 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3568 offsetof(struct __sk_buff, data)),
3569 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3570 offsetof(struct __sk_buff, data_end)),
3571 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3572 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3573 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3574 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3575 BPF_MOV64_IMM(BPF_REG_0, 1),
3576 BPF_EXIT_INSN(),
3577 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3578 BPF_MOV64_IMM(BPF_REG_0, 0),
3579 BPF_EXIT_INSN(),
3580 },
3581 .errstr = "invalid access to packet",
3582 .result = REJECT,
3583 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3584 },
3585 {
3586 "direct packet access: test8 (double test, variant 1)",
3587 .insns = {
3588 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3589 offsetof(struct __sk_buff, data)),
3590 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3591 offsetof(struct __sk_buff, data_end)),
3592 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3593 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3594 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3595 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3596 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3597 BPF_MOV64_IMM(BPF_REG_0, 1),
3598 BPF_EXIT_INSN(),
3599 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3600 BPF_MOV64_IMM(BPF_REG_0, 0),
3601 BPF_EXIT_INSN(),
3602 },
3603 .result = ACCEPT,
3604 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3605 },
3606 {
3607 "direct packet access: test9 (double test, variant 2)",
3608 .insns = {
3609 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3610 offsetof(struct __sk_buff, data)),
3611 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3612 offsetof(struct __sk_buff, data_end)),
3613 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3615 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3616 BPF_MOV64_IMM(BPF_REG_0, 1),
3617 BPF_EXIT_INSN(),
3618 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3619 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3620 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3621 BPF_MOV64_IMM(BPF_REG_0, 0),
3622 BPF_EXIT_INSN(),
3623 },
3624 .result = ACCEPT,
3625 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3626 },
3627 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003628 "direct packet access: test10 (write invalid)",
3629 .insns = {
3630 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3631 offsetof(struct __sk_buff, data)),
3632 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3633 offsetof(struct __sk_buff, data_end)),
3634 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3635 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3636 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3637 BPF_MOV64_IMM(BPF_REG_0, 0),
3638 BPF_EXIT_INSN(),
3639 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3640 BPF_MOV64_IMM(BPF_REG_0, 0),
3641 BPF_EXIT_INSN(),
3642 },
3643 .errstr = "invalid access to packet",
3644 .result = REJECT,
3645 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3646 },
3647 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003648 "direct packet access: test11 (shift, good access)",
3649 .insns = {
3650 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3651 offsetof(struct __sk_buff, data)),
3652 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3653 offsetof(struct __sk_buff, data_end)),
3654 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3655 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3656 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3657 BPF_MOV64_IMM(BPF_REG_3, 144),
3658 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3660 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3661 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3662 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3663 BPF_MOV64_IMM(BPF_REG_0, 1),
3664 BPF_EXIT_INSN(),
3665 BPF_MOV64_IMM(BPF_REG_0, 0),
3666 BPF_EXIT_INSN(),
3667 },
3668 .result = ACCEPT,
3669 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003670 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003671 },
3672 {
3673 "direct packet access: test12 (and, good access)",
3674 .insns = {
3675 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3676 offsetof(struct __sk_buff, data)),
3677 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3678 offsetof(struct __sk_buff, data_end)),
3679 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3681 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3682 BPF_MOV64_IMM(BPF_REG_3, 144),
3683 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3685 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3686 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3687 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3688 BPF_MOV64_IMM(BPF_REG_0, 1),
3689 BPF_EXIT_INSN(),
3690 BPF_MOV64_IMM(BPF_REG_0, 0),
3691 BPF_EXIT_INSN(),
3692 },
3693 .result = ACCEPT,
3694 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003695 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003696 },
3697 {
3698 "direct packet access: test13 (branches, good access)",
3699 .insns = {
3700 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3701 offsetof(struct __sk_buff, data)),
3702 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3703 offsetof(struct __sk_buff, data_end)),
3704 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3706 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3707 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3708 offsetof(struct __sk_buff, mark)),
3709 BPF_MOV64_IMM(BPF_REG_4, 1),
3710 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3711 BPF_MOV64_IMM(BPF_REG_3, 14),
3712 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3713 BPF_MOV64_IMM(BPF_REG_3, 24),
3714 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3716 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3717 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3718 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3719 BPF_MOV64_IMM(BPF_REG_0, 1),
3720 BPF_EXIT_INSN(),
3721 BPF_MOV64_IMM(BPF_REG_0, 0),
3722 BPF_EXIT_INSN(),
3723 },
3724 .result = ACCEPT,
3725 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003726 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003727 },
3728 {
William Tu63dfef72017-02-04 08:37:29 -08003729 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3730 .insns = {
3731 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3732 offsetof(struct __sk_buff, data)),
3733 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3734 offsetof(struct __sk_buff, data_end)),
3735 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3737 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3738 BPF_MOV64_IMM(BPF_REG_5, 12),
3739 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3740 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3741 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3742 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3743 BPF_MOV64_IMM(BPF_REG_0, 1),
3744 BPF_EXIT_INSN(),
3745 BPF_MOV64_IMM(BPF_REG_0, 0),
3746 BPF_EXIT_INSN(),
3747 },
3748 .result = ACCEPT,
3749 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003750 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08003751 },
3752 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003753 "direct packet access: test15 (spill with xadd)",
3754 .insns = {
3755 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3756 offsetof(struct __sk_buff, data)),
3757 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3758 offsetof(struct __sk_buff, data_end)),
3759 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3761 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3762 BPF_MOV64_IMM(BPF_REG_5, 4096),
3763 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3765 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3766 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3767 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3768 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3769 BPF_MOV64_IMM(BPF_REG_0, 0),
3770 BPF_EXIT_INSN(),
3771 },
3772 .errstr = "R2 invalid mem access 'inv'",
3773 .result = REJECT,
3774 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3775 },
3776 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02003777 "direct packet access: test16 (arith on data_end)",
3778 .insns = {
3779 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3780 offsetof(struct __sk_buff, data)),
3781 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3782 offsetof(struct __sk_buff, data_end)),
3783 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3784 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3786 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3787 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3788 BPF_MOV64_IMM(BPF_REG_0, 0),
3789 BPF_EXIT_INSN(),
3790 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07003791 .errstr = "R3 pointer arithmetic on pkt_end",
Daniel Borkmann728a8532017-04-27 01:39:32 +02003792 .result = REJECT,
3793 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3794 },
3795 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003796 "direct packet access: test17 (pruning, alignment)",
3797 .insns = {
3798 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3799 offsetof(struct __sk_buff, data)),
3800 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3801 offsetof(struct __sk_buff, data_end)),
3802 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3803 offsetof(struct __sk_buff, mark)),
3804 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3806 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3807 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3808 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3809 BPF_MOV64_IMM(BPF_REG_0, 0),
3810 BPF_EXIT_INSN(),
3811 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3812 BPF_JMP_A(-6),
3813 },
Edward Creef65b1842017-08-07 15:27:12 +01003814 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003815 .result = REJECT,
3816 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3817 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3818 },
3819 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003820 "direct packet access: test18 (imm += pkt_ptr, 1)",
3821 .insns = {
3822 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3823 offsetof(struct __sk_buff, data)),
3824 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3825 offsetof(struct __sk_buff, data_end)),
3826 BPF_MOV64_IMM(BPF_REG_0, 8),
3827 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3828 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3829 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3830 BPF_MOV64_IMM(BPF_REG_0, 0),
3831 BPF_EXIT_INSN(),
3832 },
3833 .result = ACCEPT,
3834 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3835 },
3836 {
3837 "direct packet access: test19 (imm += pkt_ptr, 2)",
3838 .insns = {
3839 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3840 offsetof(struct __sk_buff, data)),
3841 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3842 offsetof(struct __sk_buff, data_end)),
3843 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3845 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3846 BPF_MOV64_IMM(BPF_REG_4, 4),
3847 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3848 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3849 BPF_MOV64_IMM(BPF_REG_0, 0),
3850 BPF_EXIT_INSN(),
3851 },
3852 .result = ACCEPT,
3853 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3854 },
3855 {
3856 "direct packet access: test20 (x += pkt_ptr, 1)",
3857 .insns = {
3858 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3859 offsetof(struct __sk_buff, data)),
3860 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3861 offsetof(struct __sk_buff, data_end)),
3862 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3863 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3864 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003865 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003866 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3867 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3868 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003870 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3871 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3872 BPF_MOV64_IMM(BPF_REG_0, 0),
3873 BPF_EXIT_INSN(),
3874 },
3875 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3876 .result = ACCEPT,
3877 },
3878 {
3879 "direct packet access: test21 (x += pkt_ptr, 2)",
3880 .insns = {
3881 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3882 offsetof(struct __sk_buff, data)),
3883 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3884 offsetof(struct __sk_buff, data_end)),
3885 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3887 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3888 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3889 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3890 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003891 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003892 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3893 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003895 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3896 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3897 BPF_MOV64_IMM(BPF_REG_0, 0),
3898 BPF_EXIT_INSN(),
3899 },
3900 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3901 .result = ACCEPT,
3902 },
3903 {
3904 "direct packet access: test22 (x += pkt_ptr, 3)",
3905 .insns = {
3906 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3907 offsetof(struct __sk_buff, data)),
3908 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3909 offsetof(struct __sk_buff, data_end)),
3910 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3912 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3913 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3914 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3915 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3916 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3917 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3918 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3919 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003920 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003921 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3922 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3924 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3925 BPF_MOV64_IMM(BPF_REG_2, 1),
3926 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3927 BPF_MOV64_IMM(BPF_REG_0, 0),
3928 BPF_EXIT_INSN(),
3929 },
3930 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3931 .result = ACCEPT,
3932 },
3933 {
3934 "direct packet access: test23 (x += pkt_ptr, 4)",
3935 .insns = {
3936 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3937 offsetof(struct __sk_buff, data)),
3938 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3939 offsetof(struct __sk_buff, data_end)),
3940 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3941 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3942 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3943 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3944 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3945 BPF_MOV64_IMM(BPF_REG_0, 31),
3946 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3947 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3948 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3949 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3950 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3951 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3952 BPF_MOV64_IMM(BPF_REG_0, 0),
3953 BPF_EXIT_INSN(),
3954 },
3955 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3956 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003957 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003958 },
3959 {
3960 "direct packet access: test24 (x += pkt_ptr, 5)",
3961 .insns = {
3962 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3963 offsetof(struct __sk_buff, data)),
3964 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3965 offsetof(struct __sk_buff, data_end)),
3966 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3967 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3968 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3969 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3970 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3971 BPF_MOV64_IMM(BPF_REG_0, 64),
3972 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3973 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3974 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003976 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3977 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3978 BPF_MOV64_IMM(BPF_REG_0, 0),
3979 BPF_EXIT_INSN(),
3980 },
3981 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3982 .result = ACCEPT,
3983 },
3984 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003985 "direct packet access: test25 (marking on <, good access)",
3986 .insns = {
3987 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3988 offsetof(struct __sk_buff, data)),
3989 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3990 offsetof(struct __sk_buff, data_end)),
3991 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3993 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3994 BPF_MOV64_IMM(BPF_REG_0, 0),
3995 BPF_EXIT_INSN(),
3996 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3997 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3998 },
3999 .result = ACCEPT,
4000 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4001 },
4002 {
4003 "direct packet access: test26 (marking on <, bad access)",
4004 .insns = {
4005 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4006 offsetof(struct __sk_buff, data)),
4007 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4008 offsetof(struct __sk_buff, data_end)),
4009 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4011 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4012 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4013 BPF_MOV64_IMM(BPF_REG_0, 0),
4014 BPF_EXIT_INSN(),
4015 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4016 },
4017 .result = REJECT,
4018 .errstr = "invalid access to packet",
4019 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4020 },
4021 {
4022 "direct packet access: test27 (marking on <=, good access)",
4023 .insns = {
4024 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4025 offsetof(struct __sk_buff, data)),
4026 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4027 offsetof(struct __sk_buff, data_end)),
4028 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4030 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4031 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4032 BPF_MOV64_IMM(BPF_REG_0, 1),
4033 BPF_EXIT_INSN(),
4034 },
4035 .result = ACCEPT,
4036 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08004037 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02004038 },
4039 {
4040 "direct packet access: test28 (marking on <=, bad access)",
4041 .insns = {
4042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4043 offsetof(struct __sk_buff, data)),
4044 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4045 offsetof(struct __sk_buff, data_end)),
4046 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4048 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4049 BPF_MOV64_IMM(BPF_REG_0, 1),
4050 BPF_EXIT_INSN(),
4051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4052 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4053 },
4054 .result = REJECT,
4055 .errstr = "invalid access to packet",
4056 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4057 },
4058 {
Aaron Yue1633ac02016-08-11 18:17:17 -07004059 "helper access to packet: test1, valid packet_ptr range",
4060 .insns = {
4061 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4062 offsetof(struct xdp_md, data)),
4063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4064 offsetof(struct xdp_md, data_end)),
4065 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4067 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4068 BPF_LD_MAP_FD(BPF_REG_1, 0),
4069 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4070 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004071 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4072 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004073 BPF_MOV64_IMM(BPF_REG_0, 0),
4074 BPF_EXIT_INSN(),
4075 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004076 .fixup_map_hash_8b = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004077 .result_unpriv = ACCEPT,
4078 .result = ACCEPT,
4079 .prog_type = BPF_PROG_TYPE_XDP,
4080 },
4081 {
4082 "helper access to packet: test2, unchecked packet_ptr",
4083 .insns = {
4084 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4085 offsetof(struct xdp_md, data)),
4086 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4088 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004089 BPF_MOV64_IMM(BPF_REG_0, 0),
4090 BPF_EXIT_INSN(),
4091 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004092 .fixup_map_hash_8b = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004093 .result = REJECT,
4094 .errstr = "invalid access to packet",
4095 .prog_type = BPF_PROG_TYPE_XDP,
4096 },
4097 {
4098 "helper access to packet: test3, variable add",
4099 .insns = {
4100 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4101 offsetof(struct xdp_md, data)),
4102 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4103 offsetof(struct xdp_md, data_end)),
4104 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4106 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4107 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4108 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4109 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4110 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4112 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4113 BPF_LD_MAP_FD(BPF_REG_1, 0),
4114 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4116 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004117 BPF_MOV64_IMM(BPF_REG_0, 0),
4118 BPF_EXIT_INSN(),
4119 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004120 .fixup_map_hash_8b = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004121 .result = ACCEPT,
4122 .prog_type = BPF_PROG_TYPE_XDP,
4123 },
4124 {
4125 "helper access to packet: test4, packet_ptr with bad range",
4126 .insns = {
4127 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4128 offsetof(struct xdp_md, data)),
4129 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4130 offsetof(struct xdp_md, data_end)),
4131 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4133 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4134 BPF_MOV64_IMM(BPF_REG_0, 0),
4135 BPF_EXIT_INSN(),
4136 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4138 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004139 BPF_MOV64_IMM(BPF_REG_0, 0),
4140 BPF_EXIT_INSN(),
4141 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004142 .fixup_map_hash_8b = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004143 .result = REJECT,
4144 .errstr = "invalid access to packet",
4145 .prog_type = BPF_PROG_TYPE_XDP,
4146 },
4147 {
4148 "helper access to packet: test5, packet_ptr with too short range",
4149 .insns = {
4150 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4151 offsetof(struct xdp_md, data)),
4152 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4153 offsetof(struct xdp_md, data_end)),
4154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4155 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4157 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4158 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004159 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4160 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07004161 BPF_MOV64_IMM(BPF_REG_0, 0),
4162 BPF_EXIT_INSN(),
4163 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004164 .fixup_map_hash_8b = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07004165 .result = REJECT,
4166 .errstr = "invalid access to packet",
4167 .prog_type = BPF_PROG_TYPE_XDP,
4168 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004169 {
4170 "helper access to packet: test6, cls valid packet_ptr range",
4171 .insns = {
4172 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4173 offsetof(struct __sk_buff, data)),
4174 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4175 offsetof(struct __sk_buff, data_end)),
4176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4178 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4179 BPF_LD_MAP_FD(BPF_REG_1, 0),
4180 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4181 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4183 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004184 BPF_MOV64_IMM(BPF_REG_0, 0),
4185 BPF_EXIT_INSN(),
4186 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004187 .fixup_map_hash_8b = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004188 .result = ACCEPT,
4189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4190 },
4191 {
4192 "helper access to packet: test7, cls unchecked packet_ptr",
4193 .insns = {
4194 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4195 offsetof(struct __sk_buff, data)),
4196 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4198 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004199 BPF_MOV64_IMM(BPF_REG_0, 0),
4200 BPF_EXIT_INSN(),
4201 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004202 .fixup_map_hash_8b = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004203 .result = REJECT,
4204 .errstr = "invalid access to packet",
4205 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4206 },
4207 {
4208 "helper access to packet: test8, cls variable add",
4209 .insns = {
4210 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4211 offsetof(struct __sk_buff, data)),
4212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4213 offsetof(struct __sk_buff, data_end)),
4214 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4216 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4217 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4218 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4219 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4220 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4222 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4223 BPF_LD_MAP_FD(BPF_REG_1, 0),
4224 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004225 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4226 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004227 BPF_MOV64_IMM(BPF_REG_0, 0),
4228 BPF_EXIT_INSN(),
4229 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004230 .fixup_map_hash_8b = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004231 .result = ACCEPT,
4232 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4233 },
4234 {
4235 "helper access to packet: test9, cls packet_ptr with bad range",
4236 .insns = {
4237 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4238 offsetof(struct __sk_buff, data)),
4239 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4240 offsetof(struct __sk_buff, data_end)),
4241 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4243 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4244 BPF_MOV64_IMM(BPF_REG_0, 0),
4245 BPF_EXIT_INSN(),
4246 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4248 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004249 BPF_MOV64_IMM(BPF_REG_0, 0),
4250 BPF_EXIT_INSN(),
4251 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004252 .fixup_map_hash_8b = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004253 .result = REJECT,
4254 .errstr = "invalid access to packet",
4255 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4256 },
4257 {
4258 "helper access to packet: test10, cls packet_ptr with too short range",
4259 .insns = {
4260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4261 offsetof(struct __sk_buff, data)),
4262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4263 offsetof(struct __sk_buff, data_end)),
4264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4265 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4267 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4268 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004269 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4270 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004271 BPF_MOV64_IMM(BPF_REG_0, 0),
4272 BPF_EXIT_INSN(),
4273 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004274 .fixup_map_hash_8b = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004275 .result = REJECT,
4276 .errstr = "invalid access to packet",
4277 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4278 },
4279 {
4280 "helper access to packet: test11, cls unsuitable helper 1",
4281 .insns = {
4282 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4283 offsetof(struct __sk_buff, data)),
4284 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4285 offsetof(struct __sk_buff, data_end)),
4286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4287 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4289 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4290 BPF_MOV64_IMM(BPF_REG_2, 0),
4291 BPF_MOV64_IMM(BPF_REG_4, 42),
4292 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4294 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004295 BPF_MOV64_IMM(BPF_REG_0, 0),
4296 BPF_EXIT_INSN(),
4297 },
4298 .result = REJECT,
4299 .errstr = "helper access to the packet",
4300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4301 },
4302 {
4303 "helper access to packet: test12, cls unsuitable helper 2",
4304 .insns = {
4305 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4306 offsetof(struct __sk_buff, data)),
4307 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4308 offsetof(struct __sk_buff, data_end)),
4309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4311 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4312 BPF_MOV64_IMM(BPF_REG_2, 0),
4313 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4315 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004316 BPF_MOV64_IMM(BPF_REG_0, 0),
4317 BPF_EXIT_INSN(),
4318 },
4319 .result = REJECT,
4320 .errstr = "helper access to the packet",
4321 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4322 },
4323 {
4324 "helper access to packet: test13, cls helper ok",
4325 .insns = {
4326 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4327 offsetof(struct __sk_buff, data)),
4328 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4329 offsetof(struct __sk_buff, data_end)),
4330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4331 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4333 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4335 BPF_MOV64_IMM(BPF_REG_2, 4),
4336 BPF_MOV64_IMM(BPF_REG_3, 0),
4337 BPF_MOV64_IMM(BPF_REG_4, 0),
4338 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4340 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004341 BPF_MOV64_IMM(BPF_REG_0, 0),
4342 BPF_EXIT_INSN(),
4343 },
4344 .result = ACCEPT,
4345 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4346 },
4347 {
Edward Creef65b1842017-08-07 15:27:12 +01004348 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004349 .insns = {
4350 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4351 offsetof(struct __sk_buff, data)),
4352 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4353 offsetof(struct __sk_buff, data_end)),
4354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4355 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4357 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4358 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4359 BPF_MOV64_IMM(BPF_REG_2, 4),
4360 BPF_MOV64_IMM(BPF_REG_3, 0),
4361 BPF_MOV64_IMM(BPF_REG_4, 0),
4362 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4364 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004365 BPF_MOV64_IMM(BPF_REG_0, 0),
4366 BPF_EXIT_INSN(),
4367 },
Edward Creef65b1842017-08-07 15:27:12 +01004368 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004369 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4370 },
4371 {
Edward Creef65b1842017-08-07 15:27:12 +01004372 "helper access to packet: test15, cls helper fail sub",
4373 .insns = {
4374 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4375 offsetof(struct __sk_buff, data)),
4376 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4377 offsetof(struct __sk_buff, data_end)),
4378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4381 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4382 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4383 BPF_MOV64_IMM(BPF_REG_2, 4),
4384 BPF_MOV64_IMM(BPF_REG_3, 0),
4385 BPF_MOV64_IMM(BPF_REG_4, 0),
4386 BPF_MOV64_IMM(BPF_REG_5, 0),
4387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4388 BPF_FUNC_csum_diff),
4389 BPF_MOV64_IMM(BPF_REG_0, 0),
4390 BPF_EXIT_INSN(),
4391 },
4392 .result = REJECT,
4393 .errstr = "invalid access to packet",
4394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4395 },
4396 {
4397 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004398 .insns = {
4399 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4400 offsetof(struct __sk_buff, data)),
4401 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4402 offsetof(struct __sk_buff, data_end)),
4403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4406 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4407 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4408 BPF_MOV64_IMM(BPF_REG_2, 8),
4409 BPF_MOV64_IMM(BPF_REG_3, 0),
4410 BPF_MOV64_IMM(BPF_REG_4, 0),
4411 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4413 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004414 BPF_MOV64_IMM(BPF_REG_0, 0),
4415 BPF_EXIT_INSN(),
4416 },
4417 .result = REJECT,
4418 .errstr = "invalid access to packet",
4419 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4420 },
4421 {
Edward Creef65b1842017-08-07 15:27:12 +01004422 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004423 .insns = {
4424 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4425 offsetof(struct __sk_buff, data)),
4426 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4427 offsetof(struct __sk_buff, data_end)),
4428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4431 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4432 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4433 BPF_MOV64_IMM(BPF_REG_2, -9),
4434 BPF_MOV64_IMM(BPF_REG_3, 0),
4435 BPF_MOV64_IMM(BPF_REG_4, 0),
4436 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4438 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004439 BPF_MOV64_IMM(BPF_REG_0, 0),
4440 BPF_EXIT_INSN(),
4441 },
4442 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004443 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004444 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4445 },
4446 {
Edward Creef65b1842017-08-07 15:27:12 +01004447 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004448 .insns = {
4449 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4450 offsetof(struct __sk_buff, data)),
4451 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4452 offsetof(struct __sk_buff, data_end)),
4453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4456 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4458 BPF_MOV64_IMM(BPF_REG_2, ~0),
4459 BPF_MOV64_IMM(BPF_REG_3, 0),
4460 BPF_MOV64_IMM(BPF_REG_4, 0),
4461 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004462 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4463 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004464 BPF_MOV64_IMM(BPF_REG_0, 0),
4465 BPF_EXIT_INSN(),
4466 },
4467 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004468 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004469 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4470 },
4471 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08004472 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004473 .insns = {
4474 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4475 offsetof(struct __sk_buff, data)),
4476 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4477 offsetof(struct __sk_buff, data_end)),
4478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4481 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4483 BPF_MOV64_IMM(BPF_REG_2, 0),
4484 BPF_MOV64_IMM(BPF_REG_3, 0),
4485 BPF_MOV64_IMM(BPF_REG_4, 0),
4486 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4488 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004489 BPF_MOV64_IMM(BPF_REG_0, 0),
4490 BPF_EXIT_INSN(),
4491 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08004492 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004493 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4494 },
4495 {
Edward Creef65b1842017-08-07 15:27:12 +01004496 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004497 .insns = {
4498 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4499 offsetof(struct __sk_buff, data)),
4500 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4501 offsetof(struct __sk_buff, data_end)),
4502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4503 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4505 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4506 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4507 BPF_MOV64_IMM(BPF_REG_2, 4),
4508 BPF_MOV64_IMM(BPF_REG_3, 0),
4509 BPF_MOV64_IMM(BPF_REG_4, 0),
4510 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4512 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004513 BPF_MOV64_IMM(BPF_REG_0, 0),
4514 BPF_EXIT_INSN(),
4515 },
4516 .result = REJECT,
4517 .errstr = "R1 type=pkt_end expected=fp",
4518 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4519 },
4520 {
Edward Creef65b1842017-08-07 15:27:12 +01004521 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004522 .insns = {
4523 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4524 offsetof(struct __sk_buff, data)),
4525 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4526 offsetof(struct __sk_buff, data_end)),
4527 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4528 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4530 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4531 BPF_MOV64_IMM(BPF_REG_2, 4),
4532 BPF_MOV64_IMM(BPF_REG_3, 0),
4533 BPF_MOV64_IMM(BPF_REG_4, 0),
4534 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004535 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4536 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004537 BPF_MOV64_IMM(BPF_REG_0, 0),
4538 BPF_EXIT_INSN(),
4539 },
4540 .result = REJECT,
4541 .errstr = "invalid access to packet",
4542 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4543 },
Josef Bacik48461132016-09-28 10:54:32 -04004544 {
4545 "valid map access into an array with a constant",
4546 .insns = {
4547 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4548 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4550 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004551 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4552 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004554 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4555 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004556 BPF_EXIT_INSN(),
4557 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004558 .fixup_map_hash_48b = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004559 .errstr_unpriv = "R0 leaks addr",
4560 .result_unpriv = REJECT,
4561 .result = ACCEPT,
4562 },
4563 {
4564 "valid map access into an array with a register",
4565 .insns = {
4566 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4567 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4569 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004570 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4571 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4573 BPF_MOV64_IMM(BPF_REG_1, 4),
4574 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4575 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004576 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4577 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004578 BPF_EXIT_INSN(),
4579 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004580 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004581 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004582 .result_unpriv = REJECT,
4583 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004584 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004585 },
4586 {
4587 "valid map access into an array with a variable",
4588 .insns = {
4589 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4590 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4592 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4594 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4596 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4597 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4598 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4599 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004600 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4601 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004602 BPF_EXIT_INSN(),
4603 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004604 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004605 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004606 .result_unpriv = REJECT,
4607 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004608 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004609 },
4610 {
4611 "valid map access into an array with a signed variable",
4612 .insns = {
4613 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4614 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4616 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4618 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4620 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4621 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4622 BPF_MOV32_IMM(BPF_REG_1, 0),
4623 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4624 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4625 BPF_MOV32_IMM(BPF_REG_1, 0),
4626 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4627 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004628 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4629 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004630 BPF_EXIT_INSN(),
4631 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004632 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004633 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004634 .result_unpriv = REJECT,
4635 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004636 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004637 },
4638 {
4639 "invalid map access into an array with a constant",
4640 .insns = {
4641 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4642 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4644 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004645 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4646 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4648 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4649 offsetof(struct test_val, foo)),
4650 BPF_EXIT_INSN(),
4651 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004652 .fixup_map_hash_48b = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004653 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4654 .result = REJECT,
4655 },
4656 {
4657 "invalid map access into an array with a register",
4658 .insns = {
4659 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4660 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4662 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4664 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4666 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4667 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4668 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004669 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4670 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004671 BPF_EXIT_INSN(),
4672 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004673 .fixup_map_hash_48b = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004674 .errstr = "R0 min value is outside of the array range",
4675 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004676 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004677 },
4678 {
4679 "invalid map access into an array with a variable",
4680 .insns = {
4681 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4682 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4684 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4686 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4688 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4689 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4690 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004691 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4692 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004693 BPF_EXIT_INSN(),
4694 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004695 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004696 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04004697 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004698 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004699 },
4700 {
4701 "invalid map access into an array with no floor check",
4702 .insns = {
4703 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4704 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4706 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004707 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4708 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004709 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01004710 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04004711 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4712 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4713 BPF_MOV32_IMM(BPF_REG_1, 0),
4714 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4715 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004716 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4717 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004718 BPF_EXIT_INSN(),
4719 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004720 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004721 .errstr_unpriv = "R0 leaks addr",
4722 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004723 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004724 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004725 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004726 },
4727 {
4728 "invalid map access into an array with a invalid max check",
4729 .insns = {
4730 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4731 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4733 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004734 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4735 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4737 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4738 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4739 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4740 BPF_MOV32_IMM(BPF_REG_1, 0),
4741 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4742 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004743 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4744 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004745 BPF_EXIT_INSN(),
4746 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004747 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004748 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004749 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004750 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004751 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004752 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004753 },
4754 {
4755 "invalid map access into an array with a invalid max check",
4756 .insns = {
4757 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4760 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4762 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004763 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4764 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4765 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4768 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4770 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4772 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004773 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4774 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004775 BPF_EXIT_INSN(),
4776 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004777 .fixup_map_hash_48b = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004778 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04004779 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004780 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004781 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02004782 {
Roman Gushchind4c9f572018-08-02 14:27:28 -07004783 "valid cgroup storage access",
4784 .insns = {
4785 BPF_MOV64_IMM(BPF_REG_2, 0),
4786 BPF_LD_MAP_FD(BPF_REG_1, 0),
4787 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4788 BPF_FUNC_get_local_storage),
4789 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4790 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4791 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4792 BPF_EXIT_INSN(),
4793 },
4794 .fixup_cgroup_storage = { 1 },
4795 .result = ACCEPT,
4796 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4797 },
4798 {
4799 "invalid cgroup storage access 1",
4800 .insns = {
4801 BPF_MOV64_IMM(BPF_REG_2, 0),
4802 BPF_LD_MAP_FD(BPF_REG_1, 0),
4803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4804 BPF_FUNC_get_local_storage),
4805 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4806 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4807 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4808 BPF_EXIT_INSN(),
4809 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004810 .fixup_map_hash_8b = { 1 },
Roman Gushchind4c9f572018-08-02 14:27:28 -07004811 .result = REJECT,
4812 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
4813 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4814 },
4815 {
4816 "invalid cgroup storage access 2",
4817 .insns = {
4818 BPF_MOV64_IMM(BPF_REG_2, 0),
4819 BPF_LD_MAP_FD(BPF_REG_1, 1),
4820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4821 BPF_FUNC_get_local_storage),
4822 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4823 BPF_EXIT_INSN(),
4824 },
4825 .result = REJECT,
4826 .errstr = "fd 1 is not pointing to valid bpf_map",
4827 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4828 },
4829 {
Roman Gushchina3c60542018-09-28 14:45:53 +00004830 "invalid cgroup storage access 3",
Roman Gushchind4c9f572018-08-02 14:27:28 -07004831 .insns = {
4832 BPF_MOV64_IMM(BPF_REG_2, 0),
4833 BPF_LD_MAP_FD(BPF_REG_1, 0),
4834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4835 BPF_FUNC_get_local_storage),
4836 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
4837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4838 BPF_MOV64_IMM(BPF_REG_0, 0),
4839 BPF_EXIT_INSN(),
4840 },
4841 .fixup_cgroup_storage = { 1 },
4842 .result = REJECT,
4843 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
4844 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4845 },
4846 {
4847 "invalid cgroup storage access 4",
4848 .insns = {
4849 BPF_MOV64_IMM(BPF_REG_2, 0),
4850 BPF_LD_MAP_FD(BPF_REG_1, 0),
4851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4852 BPF_FUNC_get_local_storage),
4853 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
4854 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4855 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4856 BPF_EXIT_INSN(),
4857 },
4858 .fixup_cgroup_storage = { 1 },
4859 .result = REJECT,
4860 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
4861 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4862 },
4863 {
4864 "invalid cgroup storage access 5",
4865 .insns = {
4866 BPF_MOV64_IMM(BPF_REG_2, 7),
4867 BPF_LD_MAP_FD(BPF_REG_1, 0),
4868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4869 BPF_FUNC_get_local_storage),
4870 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4871 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4872 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4873 BPF_EXIT_INSN(),
4874 },
4875 .fixup_cgroup_storage = { 1 },
4876 .result = REJECT,
4877 .errstr = "get_local_storage() doesn't support non-zero flags",
4878 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4879 },
4880 {
4881 "invalid cgroup storage access 6",
4882 .insns = {
4883 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
4884 BPF_LD_MAP_FD(BPF_REG_1, 0),
4885 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4886 BPF_FUNC_get_local_storage),
4887 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4888 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4889 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4890 BPF_EXIT_INSN(),
4891 },
4892 .fixup_cgroup_storage = { 1 },
4893 .result = REJECT,
4894 .errstr = "get_local_storage() doesn't support non-zero flags",
4895 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4896 },
4897 {
Roman Gushchina3c60542018-09-28 14:45:53 +00004898 "valid per-cpu cgroup storage access",
4899 .insns = {
4900 BPF_MOV64_IMM(BPF_REG_2, 0),
4901 BPF_LD_MAP_FD(BPF_REG_1, 0),
4902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4903 BPF_FUNC_get_local_storage),
4904 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4905 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4906 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4907 BPF_EXIT_INSN(),
4908 },
4909 .fixup_percpu_cgroup_storage = { 1 },
4910 .result = ACCEPT,
4911 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4912 },
4913 {
4914 "invalid per-cpu cgroup storage access 1",
4915 .insns = {
4916 BPF_MOV64_IMM(BPF_REG_2, 0),
4917 BPF_LD_MAP_FD(BPF_REG_1, 0),
4918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4919 BPF_FUNC_get_local_storage),
4920 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4921 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4922 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4923 BPF_EXIT_INSN(),
4924 },
Prashant Bhole908142e2018-10-09 10:04:53 +09004925 .fixup_map_hash_8b = { 1 },
Roman Gushchina3c60542018-09-28 14:45:53 +00004926 .result = REJECT,
4927 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
4928 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4929 },
4930 {
4931 "invalid per-cpu cgroup storage access 2",
4932 .insns = {
4933 BPF_MOV64_IMM(BPF_REG_2, 0),
4934 BPF_LD_MAP_FD(BPF_REG_1, 1),
4935 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4936 BPF_FUNC_get_local_storage),
4937 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4938 BPF_EXIT_INSN(),
4939 },
4940 .result = REJECT,
4941 .errstr = "fd 1 is not pointing to valid bpf_map",
4942 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4943 },
4944 {
4945 "invalid per-cpu cgroup storage access 3",
4946 .insns = {
4947 BPF_MOV64_IMM(BPF_REG_2, 0),
4948 BPF_LD_MAP_FD(BPF_REG_1, 0),
4949 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4950 BPF_FUNC_get_local_storage),
4951 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
4952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4953 BPF_MOV64_IMM(BPF_REG_0, 0),
4954 BPF_EXIT_INSN(),
4955 },
4956 .fixup_percpu_cgroup_storage = { 1 },
4957 .result = REJECT,
4958 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
4959 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4960 },
4961 {
4962 "invalid per-cpu cgroup storage access 4",
4963 .insns = {
4964 BPF_MOV64_IMM(BPF_REG_2, 0),
4965 BPF_LD_MAP_FD(BPF_REG_1, 0),
4966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4967 BPF_FUNC_get_local_storage),
4968 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
4969 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4971 BPF_EXIT_INSN(),
4972 },
4973 .fixup_cgroup_storage = { 1 },
4974 .result = REJECT,
4975 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
4976 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4977 },
4978 {
4979 "invalid per-cpu cgroup storage access 5",
4980 .insns = {
4981 BPF_MOV64_IMM(BPF_REG_2, 7),
4982 BPF_LD_MAP_FD(BPF_REG_1, 0),
4983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4984 BPF_FUNC_get_local_storage),
4985 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4986 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4987 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4988 BPF_EXIT_INSN(),
4989 },
4990 .fixup_percpu_cgroup_storage = { 1 },
4991 .result = REJECT,
4992 .errstr = "get_local_storage() doesn't support non-zero flags",
4993 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4994 },
4995 {
4996 "invalid per-cpu cgroup storage access 6",
4997 .insns = {
4998 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
4999 BPF_LD_MAP_FD(BPF_REG_1, 0),
5000 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5001 BPF_FUNC_get_local_storage),
5002 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5003 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5004 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5005 BPF_EXIT_INSN(),
5006 },
5007 .fixup_percpu_cgroup_storage = { 1 },
5008 .result = REJECT,
5009 .errstr = "get_local_storage() doesn't support non-zero flags",
5010 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5011 },
5012 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02005013 "multiple registers share map_lookup_elem result",
5014 .insns = {
5015 BPF_MOV64_IMM(BPF_REG_1, 10),
5016 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5017 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5019 BPF_LD_MAP_FD(BPF_REG_1, 0),
5020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5021 BPF_FUNC_map_lookup_elem),
5022 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5024 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5025 BPF_EXIT_INSN(),
5026 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005027 .fixup_map_hash_8b = { 4 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02005028 .result = ACCEPT,
5029 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5030 },
5031 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005032 "alu ops on ptr_to_map_value_or_null, 1",
5033 .insns = {
5034 BPF_MOV64_IMM(BPF_REG_1, 10),
5035 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5036 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5038 BPF_LD_MAP_FD(BPF_REG_1, 0),
5039 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5040 BPF_FUNC_map_lookup_elem),
5041 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5043 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5044 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5045 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5046 BPF_EXIT_INSN(),
5047 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005048 .fixup_map_hash_8b = { 4 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07005049 .errstr = "R4 pointer arithmetic on map_value_or_null",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005050 .result = REJECT,
5051 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5052 },
5053 {
5054 "alu ops on ptr_to_map_value_or_null, 2",
5055 .insns = {
5056 BPF_MOV64_IMM(BPF_REG_1, 10),
5057 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5058 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5060 BPF_LD_MAP_FD(BPF_REG_1, 0),
5061 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5062 BPF_FUNC_map_lookup_elem),
5063 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5064 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5065 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5066 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5067 BPF_EXIT_INSN(),
5068 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005069 .fixup_map_hash_8b = { 4 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07005070 .errstr = "R4 pointer arithmetic on map_value_or_null",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005071 .result = REJECT,
5072 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5073 },
5074 {
5075 "alu ops on ptr_to_map_value_or_null, 3",
5076 .insns = {
5077 BPF_MOV64_IMM(BPF_REG_1, 10),
5078 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5079 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5081 BPF_LD_MAP_FD(BPF_REG_1, 0),
5082 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5083 BPF_FUNC_map_lookup_elem),
5084 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5085 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5086 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5087 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5088 BPF_EXIT_INSN(),
5089 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005090 .fixup_map_hash_8b = { 4 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07005091 .errstr = "R4 pointer arithmetic on map_value_or_null",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005092 .result = REJECT,
5093 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5094 },
5095 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02005096 "invalid memory access with multiple map_lookup_elem calls",
5097 .insns = {
5098 BPF_MOV64_IMM(BPF_REG_1, 10),
5099 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5100 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5102 BPF_LD_MAP_FD(BPF_REG_1, 0),
5103 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5104 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5106 BPF_FUNC_map_lookup_elem),
5107 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5108 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5109 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5110 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5111 BPF_FUNC_map_lookup_elem),
5112 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5113 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5114 BPF_EXIT_INSN(),
5115 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005116 .fixup_map_hash_8b = { 4 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02005117 .result = REJECT,
5118 .errstr = "R4 !read_ok",
5119 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5120 },
5121 {
5122 "valid indirect map_lookup_elem access with 2nd lookup in branch",
5123 .insns = {
5124 BPF_MOV64_IMM(BPF_REG_1, 10),
5125 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5126 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5128 BPF_LD_MAP_FD(BPF_REG_1, 0),
5129 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5130 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5132 BPF_FUNC_map_lookup_elem),
5133 BPF_MOV64_IMM(BPF_REG_2, 10),
5134 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5135 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5136 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5138 BPF_FUNC_map_lookup_elem),
5139 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5141 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5142 BPF_EXIT_INSN(),
5143 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005144 .fixup_map_hash_8b = { 4 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02005145 .result = ACCEPT,
5146 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5147 },
Josef Bacike9548902016-11-29 12:35:19 -05005148 {
5149 "invalid map access from else condition",
5150 .insns = {
5151 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5152 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5154 BPF_LD_MAP_FD(BPF_REG_1, 0),
5155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5157 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5158 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5160 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5161 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5162 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5163 BPF_EXIT_INSN(),
5164 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005165 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005166 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05005167 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01005168 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05005169 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005170 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05005171 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08005172 {
5173 "constant register |= constant should keep constant type",
5174 .insns = {
5175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5177 BPF_MOV64_IMM(BPF_REG_2, 34),
5178 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5179 BPF_MOV64_IMM(BPF_REG_3, 0),
5180 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5181 BPF_EXIT_INSN(),
5182 },
5183 .result = ACCEPT,
5184 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5185 },
5186 {
5187 "constant register |= constant should not bypass stack boundary checks",
5188 .insns = {
5189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5191 BPF_MOV64_IMM(BPF_REG_2, 34),
5192 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5193 BPF_MOV64_IMM(BPF_REG_3, 0),
5194 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5195 BPF_EXIT_INSN(),
5196 },
5197 .errstr = "invalid stack type R1 off=-48 access_size=58",
5198 .result = REJECT,
5199 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5200 },
5201 {
5202 "constant register |= constant register should keep constant type",
5203 .insns = {
5204 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5206 BPF_MOV64_IMM(BPF_REG_2, 34),
5207 BPF_MOV64_IMM(BPF_REG_4, 13),
5208 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5209 BPF_MOV64_IMM(BPF_REG_3, 0),
5210 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5211 BPF_EXIT_INSN(),
5212 },
5213 .result = ACCEPT,
5214 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5215 },
5216 {
5217 "constant register |= constant register should not bypass stack boundary checks",
5218 .insns = {
5219 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5221 BPF_MOV64_IMM(BPF_REG_2, 34),
5222 BPF_MOV64_IMM(BPF_REG_4, 24),
5223 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5224 BPF_MOV64_IMM(BPF_REG_3, 0),
5225 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5226 BPF_EXIT_INSN(),
5227 },
5228 .errstr = "invalid stack type R1 off=-48 access_size=58",
5229 .result = REJECT,
5230 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5231 },
Thomas Graf3f731d82016-12-05 10:30:52 +01005232 {
5233 "invalid direct packet write for LWT_IN",
5234 .insns = {
5235 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5236 offsetof(struct __sk_buff, data)),
5237 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5238 offsetof(struct __sk_buff, data_end)),
5239 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5241 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5242 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5243 BPF_MOV64_IMM(BPF_REG_0, 0),
5244 BPF_EXIT_INSN(),
5245 },
5246 .errstr = "cannot write into packet",
5247 .result = REJECT,
5248 .prog_type = BPF_PROG_TYPE_LWT_IN,
5249 },
5250 {
5251 "invalid direct packet write for LWT_OUT",
5252 .insns = {
5253 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5254 offsetof(struct __sk_buff, data)),
5255 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5256 offsetof(struct __sk_buff, data_end)),
5257 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5259 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5260 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5261 BPF_MOV64_IMM(BPF_REG_0, 0),
5262 BPF_EXIT_INSN(),
5263 },
5264 .errstr = "cannot write into packet",
5265 .result = REJECT,
5266 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5267 },
5268 {
5269 "direct packet write for LWT_XMIT",
5270 .insns = {
5271 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5272 offsetof(struct __sk_buff, data)),
5273 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5274 offsetof(struct __sk_buff, data_end)),
5275 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5277 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5278 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5279 BPF_MOV64_IMM(BPF_REG_0, 0),
5280 BPF_EXIT_INSN(),
5281 },
5282 .result = ACCEPT,
5283 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5284 },
5285 {
5286 "direct packet read for LWT_IN",
5287 .insns = {
5288 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5289 offsetof(struct __sk_buff, data)),
5290 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5291 offsetof(struct __sk_buff, data_end)),
5292 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5294 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5295 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5296 BPF_MOV64_IMM(BPF_REG_0, 0),
5297 BPF_EXIT_INSN(),
5298 },
5299 .result = ACCEPT,
5300 .prog_type = BPF_PROG_TYPE_LWT_IN,
5301 },
5302 {
5303 "direct packet read for LWT_OUT",
5304 .insns = {
5305 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5306 offsetof(struct __sk_buff, data)),
5307 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5308 offsetof(struct __sk_buff, data_end)),
5309 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5311 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5312 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5313 BPF_MOV64_IMM(BPF_REG_0, 0),
5314 BPF_EXIT_INSN(),
5315 },
5316 .result = ACCEPT,
5317 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5318 },
5319 {
5320 "direct packet read for LWT_XMIT",
5321 .insns = {
5322 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5323 offsetof(struct __sk_buff, data)),
5324 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5325 offsetof(struct __sk_buff, data_end)),
5326 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5328 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5329 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5330 BPF_MOV64_IMM(BPF_REG_0, 0),
5331 BPF_EXIT_INSN(),
5332 },
5333 .result = ACCEPT,
5334 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5335 },
5336 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07005337 "overlapping checks for direct packet access",
5338 .insns = {
5339 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5340 offsetof(struct __sk_buff, data)),
5341 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5342 offsetof(struct __sk_buff, data_end)),
5343 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5344 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5345 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5346 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5348 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5349 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5350 BPF_MOV64_IMM(BPF_REG_0, 0),
5351 BPF_EXIT_INSN(),
5352 },
5353 .result = ACCEPT,
5354 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5355 },
5356 {
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +02005357 "make headroom for LWT_XMIT",
5358 .insns = {
5359 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5360 BPF_MOV64_IMM(BPF_REG_2, 34),
5361 BPF_MOV64_IMM(BPF_REG_3, 0),
5362 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5363 /* split for s390 to succeed */
5364 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5365 BPF_MOV64_IMM(BPF_REG_2, 42),
5366 BPF_MOV64_IMM(BPF_REG_3, 0),
5367 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5368 BPF_MOV64_IMM(BPF_REG_0, 0),
5369 BPF_EXIT_INSN(),
5370 },
5371 .result = ACCEPT,
5372 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5373 },
5374 {
Thomas Graf3f731d82016-12-05 10:30:52 +01005375 "invalid access of tc_classid for LWT_IN",
5376 .insns = {
5377 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5378 offsetof(struct __sk_buff, tc_classid)),
5379 BPF_EXIT_INSN(),
5380 },
5381 .result = REJECT,
5382 .errstr = "invalid bpf_context access",
5383 },
5384 {
5385 "invalid access of tc_classid for LWT_OUT",
5386 .insns = {
5387 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5388 offsetof(struct __sk_buff, tc_classid)),
5389 BPF_EXIT_INSN(),
5390 },
5391 .result = REJECT,
5392 .errstr = "invalid bpf_context access",
5393 },
5394 {
5395 "invalid access of tc_classid for LWT_XMIT",
5396 .insns = {
5397 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5398 offsetof(struct __sk_buff, tc_classid)),
5399 BPF_EXIT_INSN(),
5400 },
5401 .result = REJECT,
5402 .errstr = "invalid bpf_context access",
5403 },
Gianluca Borello57225692017-01-09 10:19:47 -08005404 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005405 "leak pointer into ctx 1",
5406 .insns = {
5407 BPF_MOV64_IMM(BPF_REG_0, 0),
5408 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5409 offsetof(struct __sk_buff, cb[0])),
5410 BPF_LD_MAP_FD(BPF_REG_2, 0),
5411 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5412 offsetof(struct __sk_buff, cb[0])),
5413 BPF_EXIT_INSN(),
5414 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005415 .fixup_map_hash_8b = { 2 },
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005416 .errstr_unpriv = "R2 leaks addr into mem",
5417 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01005418 .result = REJECT,
Joe Stringer9d2be442018-10-02 13:35:31 -07005419 .errstr = "BPF_XADD stores into R1 inv is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005420 },
5421 {
5422 "leak pointer into ctx 2",
5423 .insns = {
5424 BPF_MOV64_IMM(BPF_REG_0, 0),
5425 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5426 offsetof(struct __sk_buff, cb[0])),
5427 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5428 offsetof(struct __sk_buff, cb[0])),
5429 BPF_EXIT_INSN(),
5430 },
5431 .errstr_unpriv = "R10 leaks addr into mem",
5432 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01005433 .result = REJECT,
Joe Stringer9d2be442018-10-02 13:35:31 -07005434 .errstr = "BPF_XADD stores into R1 inv is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005435 },
5436 {
5437 "leak pointer into ctx 3",
5438 .insns = {
5439 BPF_MOV64_IMM(BPF_REG_0, 0),
5440 BPF_LD_MAP_FD(BPF_REG_2, 0),
5441 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5442 offsetof(struct __sk_buff, cb[0])),
5443 BPF_EXIT_INSN(),
5444 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005445 .fixup_map_hash_8b = { 1 },
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005446 .errstr_unpriv = "R2 leaks addr into ctx",
5447 .result_unpriv = REJECT,
5448 .result = ACCEPT,
5449 },
5450 {
5451 "leak pointer into map val",
5452 .insns = {
5453 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5454 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5457 BPF_LD_MAP_FD(BPF_REG_1, 0),
5458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5459 BPF_FUNC_map_lookup_elem),
5460 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5461 BPF_MOV64_IMM(BPF_REG_3, 0),
5462 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5463 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5464 BPF_MOV64_IMM(BPF_REG_0, 0),
5465 BPF_EXIT_INSN(),
5466 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005467 .fixup_map_hash_8b = { 4 },
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02005468 .errstr_unpriv = "R6 leaks addr into mem",
5469 .result_unpriv = REJECT,
5470 .result = ACCEPT,
5471 },
5472 {
Gianluca Borello57225692017-01-09 10:19:47 -08005473 "helper access to map: full range",
5474 .insns = {
5475 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5477 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5478 BPF_LD_MAP_FD(BPF_REG_1, 0),
5479 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5480 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5481 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5482 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5483 BPF_MOV64_IMM(BPF_REG_3, 0),
5484 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5485 BPF_EXIT_INSN(),
5486 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005487 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005488 .result = ACCEPT,
5489 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5490 },
5491 {
5492 "helper access to map: partial range",
5493 .insns = {
5494 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5496 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5497 BPF_LD_MAP_FD(BPF_REG_1, 0),
5498 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5501 BPF_MOV64_IMM(BPF_REG_2, 8),
5502 BPF_MOV64_IMM(BPF_REG_3, 0),
5503 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5504 BPF_EXIT_INSN(),
5505 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005506 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005507 .result = ACCEPT,
5508 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5509 },
5510 {
5511 "helper access to map: empty range",
5512 .insns = {
5513 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5515 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5516 BPF_LD_MAP_FD(BPF_REG_1, 0),
5517 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005518 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5519 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5520 BPF_MOV64_IMM(BPF_REG_2, 0),
5521 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005522 BPF_EXIT_INSN(),
5523 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005524 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005525 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
5526 .result = REJECT,
5527 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5528 },
5529 {
5530 "helper access to map: out-of-bound range",
5531 .insns = {
5532 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5534 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5535 BPF_LD_MAP_FD(BPF_REG_1, 0),
5536 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5537 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5538 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5539 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5540 BPF_MOV64_IMM(BPF_REG_3, 0),
5541 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5542 BPF_EXIT_INSN(),
5543 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005544 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005545 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
5546 .result = REJECT,
5547 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5548 },
5549 {
5550 "helper access to map: negative range",
5551 .insns = {
5552 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5554 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5555 BPF_LD_MAP_FD(BPF_REG_1, 0),
5556 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5558 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5559 BPF_MOV64_IMM(BPF_REG_2, -8),
5560 BPF_MOV64_IMM(BPF_REG_3, 0),
5561 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5562 BPF_EXIT_INSN(),
5563 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005564 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005565 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005566 .result = REJECT,
5567 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5568 },
5569 {
5570 "helper access to adjusted map (via const imm): full range",
5571 .insns = {
5572 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5573 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5574 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5575 BPF_LD_MAP_FD(BPF_REG_1, 0),
5576 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5578 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5580 offsetof(struct test_val, foo)),
5581 BPF_MOV64_IMM(BPF_REG_2,
5582 sizeof(struct test_val) -
5583 offsetof(struct test_val, foo)),
5584 BPF_MOV64_IMM(BPF_REG_3, 0),
5585 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5586 BPF_EXIT_INSN(),
5587 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005588 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005589 .result = ACCEPT,
5590 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5591 },
5592 {
5593 "helper access to adjusted map (via const imm): partial range",
5594 .insns = {
5595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5597 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5598 BPF_LD_MAP_FD(BPF_REG_1, 0),
5599 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5603 offsetof(struct test_val, foo)),
5604 BPF_MOV64_IMM(BPF_REG_2, 8),
5605 BPF_MOV64_IMM(BPF_REG_3, 0),
5606 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5607 BPF_EXIT_INSN(),
5608 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005609 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005610 .result = ACCEPT,
5611 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5612 },
5613 {
5614 "helper access to adjusted map (via const imm): empty range",
5615 .insns = {
5616 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5618 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5619 BPF_LD_MAP_FD(BPF_REG_1, 0),
5620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08005622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5624 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005625 BPF_MOV64_IMM(BPF_REG_2, 0),
5626 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005627 BPF_EXIT_INSN(),
5628 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005629 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005630 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08005631 .result = REJECT,
5632 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5633 },
5634 {
5635 "helper access to adjusted map (via const imm): out-of-bound range",
5636 .insns = {
5637 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5639 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5640 BPF_LD_MAP_FD(BPF_REG_1, 0),
5641 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5642 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5645 offsetof(struct test_val, foo)),
5646 BPF_MOV64_IMM(BPF_REG_2,
5647 sizeof(struct test_val) -
5648 offsetof(struct test_val, foo) + 8),
5649 BPF_MOV64_IMM(BPF_REG_3, 0),
5650 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5651 BPF_EXIT_INSN(),
5652 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005653 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005654 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5655 .result = REJECT,
5656 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5657 },
5658 {
5659 "helper access to adjusted map (via const imm): negative range (> adjustment)",
5660 .insns = {
5661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5663 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5664 BPF_LD_MAP_FD(BPF_REG_1, 0),
5665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5669 offsetof(struct test_val, foo)),
5670 BPF_MOV64_IMM(BPF_REG_2, -8),
5671 BPF_MOV64_IMM(BPF_REG_3, 0),
5672 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5673 BPF_EXIT_INSN(),
5674 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005675 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005676 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005677 .result = REJECT,
5678 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5679 },
5680 {
5681 "helper access to adjusted map (via const imm): negative range (< adjustment)",
5682 .insns = {
5683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5685 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5686 BPF_LD_MAP_FD(BPF_REG_1, 0),
5687 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5689 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5691 offsetof(struct test_val, foo)),
5692 BPF_MOV64_IMM(BPF_REG_2, -1),
5693 BPF_MOV64_IMM(BPF_REG_3, 0),
5694 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5695 BPF_EXIT_INSN(),
5696 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005697 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005698 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005699 .result = REJECT,
5700 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5701 },
5702 {
5703 "helper access to adjusted map (via const reg): full range",
5704 .insns = {
5705 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5707 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5708 BPF_LD_MAP_FD(BPF_REG_1, 0),
5709 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5711 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5712 BPF_MOV64_IMM(BPF_REG_3,
5713 offsetof(struct test_val, foo)),
5714 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5715 BPF_MOV64_IMM(BPF_REG_2,
5716 sizeof(struct test_val) -
5717 offsetof(struct test_val, foo)),
5718 BPF_MOV64_IMM(BPF_REG_3, 0),
5719 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5720 BPF_EXIT_INSN(),
5721 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005722 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005723 .result = ACCEPT,
5724 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5725 },
5726 {
5727 "helper access to adjusted map (via const reg): partial range",
5728 .insns = {
5729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5731 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5732 BPF_LD_MAP_FD(BPF_REG_1, 0),
5733 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5736 BPF_MOV64_IMM(BPF_REG_3,
5737 offsetof(struct test_val, foo)),
5738 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5739 BPF_MOV64_IMM(BPF_REG_2, 8),
5740 BPF_MOV64_IMM(BPF_REG_3, 0),
5741 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5742 BPF_EXIT_INSN(),
5743 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005744 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005745 .result = ACCEPT,
5746 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5747 },
5748 {
5749 "helper access to adjusted map (via const reg): empty range",
5750 .insns = {
5751 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5753 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5754 BPF_LD_MAP_FD(BPF_REG_1, 0),
5755 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08005757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5758 BPF_MOV64_IMM(BPF_REG_3, 0),
5759 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005760 BPF_MOV64_IMM(BPF_REG_2, 0),
5761 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005762 BPF_EXIT_INSN(),
5763 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005764 .fixup_map_hash_48b = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005765 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005766 .result = REJECT,
5767 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5768 },
5769 {
5770 "helper access to adjusted map (via const reg): out-of-bound range",
5771 .insns = {
5772 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5774 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5775 BPF_LD_MAP_FD(BPF_REG_1, 0),
5776 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5778 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5779 BPF_MOV64_IMM(BPF_REG_3,
5780 offsetof(struct test_val, foo)),
5781 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5782 BPF_MOV64_IMM(BPF_REG_2,
5783 sizeof(struct test_val) -
5784 offsetof(struct test_val, foo) + 8),
5785 BPF_MOV64_IMM(BPF_REG_3, 0),
5786 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5787 BPF_EXIT_INSN(),
5788 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005789 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005790 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5791 .result = REJECT,
5792 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5793 },
5794 {
5795 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5796 .insns = {
5797 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5799 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5800 BPF_LD_MAP_FD(BPF_REG_1, 0),
5801 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5803 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5804 BPF_MOV64_IMM(BPF_REG_3,
5805 offsetof(struct test_val, foo)),
5806 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5807 BPF_MOV64_IMM(BPF_REG_2, -8),
5808 BPF_MOV64_IMM(BPF_REG_3, 0),
5809 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5810 BPF_EXIT_INSN(),
5811 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005812 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005813 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005814 .result = REJECT,
5815 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5816 },
5817 {
5818 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5819 .insns = {
5820 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5822 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5823 BPF_LD_MAP_FD(BPF_REG_1, 0),
5824 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5825 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5826 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5827 BPF_MOV64_IMM(BPF_REG_3,
5828 offsetof(struct test_val, foo)),
5829 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5830 BPF_MOV64_IMM(BPF_REG_2, -1),
5831 BPF_MOV64_IMM(BPF_REG_3, 0),
5832 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5833 BPF_EXIT_INSN(),
5834 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005835 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005836 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005837 .result = REJECT,
5838 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5839 },
5840 {
5841 "helper access to adjusted map (via variable): full range",
5842 .insns = {
5843 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5845 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5846 BPF_LD_MAP_FD(BPF_REG_1, 0),
5847 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5849 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5850 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5851 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5852 offsetof(struct test_val, foo), 4),
5853 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5854 BPF_MOV64_IMM(BPF_REG_2,
5855 sizeof(struct test_val) -
5856 offsetof(struct test_val, foo)),
5857 BPF_MOV64_IMM(BPF_REG_3, 0),
5858 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5859 BPF_EXIT_INSN(),
5860 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005861 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005862 .result = ACCEPT,
5863 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5864 },
5865 {
5866 "helper access to adjusted map (via variable): partial range",
5867 .insns = {
5868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5870 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5871 BPF_LD_MAP_FD(BPF_REG_1, 0),
5872 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5875 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5876 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5877 offsetof(struct test_val, foo), 4),
5878 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5879 BPF_MOV64_IMM(BPF_REG_2, 8),
5880 BPF_MOV64_IMM(BPF_REG_3, 0),
5881 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5882 BPF_EXIT_INSN(),
5883 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005884 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005885 .result = ACCEPT,
5886 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5887 },
5888 {
5889 "helper access to adjusted map (via variable): empty range",
5890 .insns = {
5891 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5893 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5894 BPF_LD_MAP_FD(BPF_REG_1, 0),
5895 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005896 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08005897 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5898 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5899 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005900 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08005901 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005902 BPF_MOV64_IMM(BPF_REG_2, 0),
5903 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005904 BPF_EXIT_INSN(),
5905 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005906 .fixup_map_hash_48b = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005907 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005908 .result = REJECT,
5909 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5910 },
5911 {
5912 "helper access to adjusted map (via variable): no max check",
5913 .insns = {
5914 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5916 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5917 BPF_LD_MAP_FD(BPF_REG_1, 0),
5918 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5919 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5920 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5921 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5922 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01005923 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08005924 BPF_MOV64_IMM(BPF_REG_3, 0),
5925 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5926 BPF_EXIT_INSN(),
5927 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005928 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005929 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08005930 .result = REJECT,
5931 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5932 },
5933 {
5934 "helper access to adjusted map (via variable): wrong max check",
5935 .insns = {
5936 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5938 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5939 BPF_LD_MAP_FD(BPF_REG_1, 0),
5940 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5941 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5942 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5943 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5944 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5945 offsetof(struct test_val, foo), 4),
5946 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5947 BPF_MOV64_IMM(BPF_REG_2,
5948 sizeof(struct test_val) -
5949 offsetof(struct test_val, foo) + 1),
5950 BPF_MOV64_IMM(BPF_REG_3, 0),
5951 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5952 BPF_EXIT_INSN(),
5953 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005954 .fixup_map_hash_48b = { 3 },
Gianluca Borello57225692017-01-09 10:19:47 -08005955 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5956 .result = REJECT,
5957 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5958 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08005959 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02005960 "helper access to map: bounds check using <, good access",
5961 .insns = {
5962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5964 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5965 BPF_LD_MAP_FD(BPF_REG_1, 0),
5966 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5967 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5968 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5969 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5970 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5971 BPF_MOV64_IMM(BPF_REG_0, 0),
5972 BPF_EXIT_INSN(),
5973 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5974 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5975 BPF_MOV64_IMM(BPF_REG_0, 0),
5976 BPF_EXIT_INSN(),
5977 },
Prashant Bhole908142e2018-10-09 10:04:53 +09005978 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02005979 .result = ACCEPT,
5980 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5981 },
5982 {
5983 "helper access to map: bounds check using <, bad access",
5984 .insns = {
5985 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5987 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5988 BPF_LD_MAP_FD(BPF_REG_1, 0),
5989 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5990 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5992 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5993 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5994 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5995 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5996 BPF_MOV64_IMM(BPF_REG_0, 0),
5997 BPF_EXIT_INSN(),
5998 BPF_MOV64_IMM(BPF_REG_0, 0),
5999 BPF_EXIT_INSN(),
6000 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006001 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006002 .result = REJECT,
6003 .errstr = "R1 unbounded memory access",
6004 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6005 },
6006 {
6007 "helper access to map: bounds check using <=, good access",
6008 .insns = {
6009 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6011 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6012 BPF_LD_MAP_FD(BPF_REG_1, 0),
6013 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6014 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6015 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6016 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6017 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6018 BPF_MOV64_IMM(BPF_REG_0, 0),
6019 BPF_EXIT_INSN(),
6020 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6021 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6022 BPF_MOV64_IMM(BPF_REG_0, 0),
6023 BPF_EXIT_INSN(),
6024 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006025 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006026 .result = ACCEPT,
6027 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6028 },
6029 {
6030 "helper access to map: bounds check using <=, bad access",
6031 .insns = {
6032 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6034 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6035 BPF_LD_MAP_FD(BPF_REG_1, 0),
6036 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6037 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6038 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6039 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6040 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6041 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6042 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6043 BPF_MOV64_IMM(BPF_REG_0, 0),
6044 BPF_EXIT_INSN(),
6045 BPF_MOV64_IMM(BPF_REG_0, 0),
6046 BPF_EXIT_INSN(),
6047 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006048 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006049 .result = REJECT,
6050 .errstr = "R1 unbounded memory access",
6051 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6052 },
6053 {
6054 "helper access to map: bounds check using s<, good access",
6055 .insns = {
6056 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6058 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6059 BPF_LD_MAP_FD(BPF_REG_1, 0),
6060 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6061 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6062 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6064 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6065 BPF_MOV64_IMM(BPF_REG_0, 0),
6066 BPF_EXIT_INSN(),
6067 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6068 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6069 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6070 BPF_MOV64_IMM(BPF_REG_0, 0),
6071 BPF_EXIT_INSN(),
6072 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006073 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006074 .result = ACCEPT,
6075 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6076 },
6077 {
6078 "helper access to map: bounds check using s<, good access 2",
6079 .insns = {
6080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6082 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6083 BPF_LD_MAP_FD(BPF_REG_1, 0),
6084 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6086 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6087 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6088 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6089 BPF_MOV64_IMM(BPF_REG_0, 0),
6090 BPF_EXIT_INSN(),
6091 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6092 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6093 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6094 BPF_MOV64_IMM(BPF_REG_0, 0),
6095 BPF_EXIT_INSN(),
6096 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006097 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006098 .result = ACCEPT,
6099 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6100 },
6101 {
6102 "helper access to map: bounds check using s<, bad access",
6103 .insns = {
6104 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6106 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6107 BPF_LD_MAP_FD(BPF_REG_1, 0),
6108 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6109 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6110 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6111 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6112 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6113 BPF_MOV64_IMM(BPF_REG_0, 0),
6114 BPF_EXIT_INSN(),
6115 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6116 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6117 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6118 BPF_MOV64_IMM(BPF_REG_0, 0),
6119 BPF_EXIT_INSN(),
6120 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006121 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006122 .result = REJECT,
6123 .errstr = "R1 min value is negative",
6124 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6125 },
6126 {
6127 "helper access to map: bounds check using s<=, good access",
6128 .insns = {
6129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6131 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6132 BPF_LD_MAP_FD(BPF_REG_1, 0),
6133 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6134 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6135 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6136 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6137 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6138 BPF_MOV64_IMM(BPF_REG_0, 0),
6139 BPF_EXIT_INSN(),
6140 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6141 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6142 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6143 BPF_MOV64_IMM(BPF_REG_0, 0),
6144 BPF_EXIT_INSN(),
6145 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006146 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006147 .result = ACCEPT,
6148 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6149 },
6150 {
6151 "helper access to map: bounds check using s<=, good access 2",
6152 .insns = {
6153 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6155 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6156 BPF_LD_MAP_FD(BPF_REG_1, 0),
6157 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6159 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6160 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6161 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6162 BPF_MOV64_IMM(BPF_REG_0, 0),
6163 BPF_EXIT_INSN(),
6164 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6165 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6166 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6167 BPF_MOV64_IMM(BPF_REG_0, 0),
6168 BPF_EXIT_INSN(),
6169 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006170 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006171 .result = ACCEPT,
6172 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6173 },
6174 {
6175 "helper access to map: bounds check using s<=, bad access",
6176 .insns = {
6177 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6179 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6180 BPF_LD_MAP_FD(BPF_REG_1, 0),
6181 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6182 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6184 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6185 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6186 BPF_MOV64_IMM(BPF_REG_0, 0),
6187 BPF_EXIT_INSN(),
6188 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6189 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6190 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6191 BPF_MOV64_IMM(BPF_REG_0, 0),
6192 BPF_EXIT_INSN(),
6193 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006194 .fixup_map_hash_48b = { 3 },
Daniel Borkmann31e482b2017-08-10 01:40:03 +02006195 .result = REJECT,
6196 .errstr = "R1 min value is negative",
6197 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6198 },
6199 {
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006200 "map lookup helper access to map",
6201 .insns = {
6202 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6204 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6205 BPF_LD_MAP_FD(BPF_REG_1, 0),
6206 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6207 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6208 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6209 BPF_LD_MAP_FD(BPF_REG_1, 0),
6210 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6211 BPF_EXIT_INSN(),
6212 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006213 .fixup_map_hash_16b = { 3, 8 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006214 .result = ACCEPT,
6215 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6216 },
6217 {
6218 "map update helper access to map",
6219 .insns = {
6220 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6222 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6223 BPF_LD_MAP_FD(BPF_REG_1, 0),
6224 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6225 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6226 BPF_MOV64_IMM(BPF_REG_4, 0),
6227 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6228 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6229 BPF_LD_MAP_FD(BPF_REG_1, 0),
6230 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6231 BPF_EXIT_INSN(),
6232 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006233 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006234 .result = ACCEPT,
6235 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6236 },
6237 {
6238 "map update helper access to map: wrong size",
6239 .insns = {
6240 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6242 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6243 BPF_LD_MAP_FD(BPF_REG_1, 0),
6244 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6245 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6246 BPF_MOV64_IMM(BPF_REG_4, 0),
6247 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6248 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6249 BPF_LD_MAP_FD(BPF_REG_1, 0),
6250 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6251 BPF_EXIT_INSN(),
6252 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006253 .fixup_map_hash_8b = { 3 },
6254 .fixup_map_hash_16b = { 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006255 .result = REJECT,
6256 .errstr = "invalid access to map value, value_size=8 off=0 size=16",
6257 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6258 },
6259 {
6260 "map helper access to adjusted map (via const imm)",
6261 .insns = {
6262 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6264 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6265 BPF_LD_MAP_FD(BPF_REG_1, 0),
6266 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6267 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6268 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6270 offsetof(struct other_val, bar)),
6271 BPF_LD_MAP_FD(BPF_REG_1, 0),
6272 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6273 BPF_EXIT_INSN(),
6274 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006275 .fixup_map_hash_16b = { 3, 9 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006276 .result = ACCEPT,
6277 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6278 },
6279 {
6280 "map helper access to adjusted map (via const imm): out-of-bound 1",
6281 .insns = {
6282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6284 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6285 BPF_LD_MAP_FD(BPF_REG_1, 0),
6286 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6287 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6290 sizeof(struct other_val) - 4),
6291 BPF_LD_MAP_FD(BPF_REG_1, 0),
6292 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6293 BPF_EXIT_INSN(),
6294 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006295 .fixup_map_hash_16b = { 3, 9 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006296 .result = REJECT,
6297 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6298 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6299 },
6300 {
6301 "map helper access to adjusted map (via const imm): out-of-bound 2",
6302 .insns = {
6303 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6305 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6306 BPF_LD_MAP_FD(BPF_REG_1, 0),
6307 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6308 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6309 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6311 BPF_LD_MAP_FD(BPF_REG_1, 0),
6312 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6313 BPF_EXIT_INSN(),
6314 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006315 .fixup_map_hash_16b = { 3, 9 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006316 .result = REJECT,
6317 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6318 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6319 },
6320 {
6321 "map helper access to adjusted map (via const reg)",
6322 .insns = {
6323 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6325 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6326 BPF_LD_MAP_FD(BPF_REG_1, 0),
6327 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6328 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6329 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6330 BPF_MOV64_IMM(BPF_REG_3,
6331 offsetof(struct other_val, bar)),
6332 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6333 BPF_LD_MAP_FD(BPF_REG_1, 0),
6334 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6335 BPF_EXIT_INSN(),
6336 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006337 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006338 .result = ACCEPT,
6339 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6340 },
6341 {
6342 "map helper access to adjusted map (via const reg): out-of-bound 1",
6343 .insns = {
6344 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6346 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6347 BPF_LD_MAP_FD(BPF_REG_1, 0),
6348 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6349 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6350 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6351 BPF_MOV64_IMM(BPF_REG_3,
6352 sizeof(struct other_val) - 4),
6353 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6354 BPF_LD_MAP_FD(BPF_REG_1, 0),
6355 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6356 BPF_EXIT_INSN(),
6357 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006358 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006359 .result = REJECT,
6360 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
6361 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6362 },
6363 {
6364 "map helper access to adjusted map (via const reg): out-of-bound 2",
6365 .insns = {
6366 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6368 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6369 BPF_LD_MAP_FD(BPF_REG_1, 0),
6370 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6371 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6372 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6373 BPF_MOV64_IMM(BPF_REG_3, -4),
6374 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6375 BPF_LD_MAP_FD(BPF_REG_1, 0),
6376 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6377 BPF_EXIT_INSN(),
6378 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006379 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006380 .result = REJECT,
6381 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6382 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6383 },
6384 {
6385 "map helper access to adjusted map (via variable)",
6386 .insns = {
6387 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6389 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6390 BPF_LD_MAP_FD(BPF_REG_1, 0),
6391 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6394 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6395 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6396 offsetof(struct other_val, bar), 4),
6397 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6398 BPF_LD_MAP_FD(BPF_REG_1, 0),
6399 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6400 BPF_EXIT_INSN(),
6401 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006402 .fixup_map_hash_16b = { 3, 11 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006403 .result = ACCEPT,
6404 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6405 },
6406 {
6407 "map helper access to adjusted map (via variable): no max check",
6408 .insns = {
6409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6411 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6412 BPF_LD_MAP_FD(BPF_REG_1, 0),
6413 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6414 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6415 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6416 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6417 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6418 BPF_LD_MAP_FD(BPF_REG_1, 0),
6419 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6420 BPF_EXIT_INSN(),
6421 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006422 .fixup_map_hash_16b = { 3, 10 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006423 .result = REJECT,
6424 .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
6425 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6426 },
6427 {
6428 "map helper access to adjusted map (via variable): wrong max check",
6429 .insns = {
6430 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6432 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6433 BPF_LD_MAP_FD(BPF_REG_1, 0),
6434 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6435 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6436 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6437 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6438 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6439 offsetof(struct other_val, bar) + 1, 4),
6440 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6441 BPF_LD_MAP_FD(BPF_REG_1, 0),
6442 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6443 BPF_EXIT_INSN(),
6444 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006445 .fixup_map_hash_16b = { 3, 11 },
Paul Chaignon5f90dd62018-04-24 15:08:19 +02006446 .result = REJECT,
6447 .errstr = "invalid access to map value, value_size=16 off=9 size=8",
6448 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6449 },
6450 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08006451 "map element value is preserved across register spilling",
6452 .insns = {
6453 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6455 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6456 BPF_LD_MAP_FD(BPF_REG_1, 0),
6457 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6458 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6459 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6462 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6463 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6464 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6465 BPF_EXIT_INSN(),
6466 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006467 .fixup_map_hash_48b = { 3 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08006468 .errstr_unpriv = "R0 leaks addr",
6469 .result = ACCEPT,
6470 .result_unpriv = REJECT,
6471 },
6472 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006473 "map element value or null is marked on register spilling",
6474 .insns = {
6475 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6477 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6478 BPF_LD_MAP_FD(BPF_REG_1, 0),
6479 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6480 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
6482 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6483 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6484 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6485 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6486 BPF_EXIT_INSN(),
6487 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006488 .fixup_map_hash_48b = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006489 .errstr_unpriv = "R0 leaks addr",
6490 .result = ACCEPT,
6491 .result_unpriv = REJECT,
6492 },
6493 {
6494 "map element value store of cleared call register",
6495 .insns = {
6496 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6498 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6499 BPF_LD_MAP_FD(BPF_REG_1, 0),
6500 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6501 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
6502 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
6503 BPF_EXIT_INSN(),
6504 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006505 .fixup_map_hash_48b = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006506 .errstr_unpriv = "R1 !read_ok",
6507 .errstr = "R1 !read_ok",
6508 .result = REJECT,
6509 .result_unpriv = REJECT,
6510 },
6511 {
6512 "map element value with unaligned store",
6513 .insns = {
6514 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6516 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6517 BPF_LD_MAP_FD(BPF_REG_1, 0),
6518 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
6520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6521 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6522 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
6523 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
6524 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6525 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
6526 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
6527 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
6528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
6529 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
6530 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
6531 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
6532 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
6533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
6534 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
6535 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
6536 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
6537 BPF_EXIT_INSN(),
6538 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006539 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006540 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006541 .result = ACCEPT,
6542 .result_unpriv = REJECT,
6543 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6544 },
6545 {
6546 "map element value with unaligned load",
6547 .insns = {
6548 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6550 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6551 BPF_LD_MAP_FD(BPF_REG_1, 0),
6552 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6554 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6555 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
6556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6557 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6558 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
6559 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6560 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
6561 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
6562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
6563 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6564 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
6565 BPF_EXIT_INSN(),
6566 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006567 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006568 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006569 .result = ACCEPT,
6570 .result_unpriv = REJECT,
6571 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6572 },
6573 {
6574 "map element value illegal alu op, 1",
6575 .insns = {
6576 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6578 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6579 BPF_LD_MAP_FD(BPF_REG_1, 0),
6580 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6582 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
6583 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6584 BPF_EXIT_INSN(),
6585 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006586 .fixup_map_hash_48b = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006587 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006588 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006589 },
6590 {
6591 "map element value illegal alu op, 2",
6592 .insns = {
6593 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6595 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6596 BPF_LD_MAP_FD(BPF_REG_1, 0),
6597 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6599 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
6600 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6601 BPF_EXIT_INSN(),
6602 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006603 .fixup_map_hash_48b = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006604 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006605 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006606 },
6607 {
6608 "map element value illegal alu op, 3",
6609 .insns = {
6610 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6612 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6613 BPF_LD_MAP_FD(BPF_REG_1, 0),
6614 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6616 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
6617 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6618 BPF_EXIT_INSN(),
6619 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006620 .fixup_map_hash_48b = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006621 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006622 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006623 },
6624 {
6625 "map element value illegal alu op, 4",
6626 .insns = {
6627 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6629 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6630 BPF_LD_MAP_FD(BPF_REG_1, 0),
6631 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6632 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6633 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
6634 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6635 BPF_EXIT_INSN(),
6636 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006637 .fixup_map_hash_48b = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006638 .errstr_unpriv = "R0 pointer arithmetic prohibited",
6639 .errstr = "invalid mem access 'inv'",
6640 .result = REJECT,
6641 .result_unpriv = REJECT,
6642 },
6643 {
6644 "map element value illegal alu op, 5",
6645 .insns = {
6646 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6648 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6649 BPF_LD_MAP_FD(BPF_REG_1, 0),
6650 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6651 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6652 BPF_MOV64_IMM(BPF_REG_3, 4096),
6653 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6655 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6656 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
6657 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
6658 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6659 BPF_EXIT_INSN(),
6660 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006661 .fixup_map_hash_48b = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006662 .errstr = "R0 invalid mem access 'inv'",
6663 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006664 },
6665 {
6666 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08006667 .insns = {
6668 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6670 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6671 BPF_LD_MAP_FD(BPF_REG_1, 0),
6672 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
6675 offsetof(struct test_val, foo)),
6676 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6677 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6679 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6680 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6681 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6682 BPF_EXIT_INSN(),
6683 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006684 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006685 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08006686 .result = ACCEPT,
6687 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006688 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08006689 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08006690 {
6691 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
6692 .insns = {
6693 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6695 BPF_MOV64_IMM(BPF_REG_0, 0),
6696 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6697 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6698 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6699 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6700 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6701 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6702 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6703 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6704 BPF_MOV64_IMM(BPF_REG_2, 16),
6705 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6706 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6707 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6708 BPF_MOV64_IMM(BPF_REG_4, 0),
6709 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6710 BPF_MOV64_IMM(BPF_REG_3, 0),
6711 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6712 BPF_MOV64_IMM(BPF_REG_0, 0),
6713 BPF_EXIT_INSN(),
6714 },
6715 .result = ACCEPT,
6716 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6717 },
6718 {
6719 "helper access to variable memory: stack, bitwise AND, zero included",
6720 .insns = {
6721 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6722 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6723 BPF_MOV64_IMM(BPF_REG_2, 16),
6724 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6725 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6726 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6727 BPF_MOV64_IMM(BPF_REG_3, 0),
6728 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6729 BPF_EXIT_INSN(),
6730 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006731 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006732 .result = REJECT,
6733 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6734 },
6735 {
6736 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
6737 .insns = {
6738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6740 BPF_MOV64_IMM(BPF_REG_2, 16),
6741 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6742 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6743 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
6744 BPF_MOV64_IMM(BPF_REG_4, 0),
6745 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6746 BPF_MOV64_IMM(BPF_REG_3, 0),
6747 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6748 BPF_MOV64_IMM(BPF_REG_0, 0),
6749 BPF_EXIT_INSN(),
6750 },
6751 .errstr = "invalid stack type R1 off=-64 access_size=65",
6752 .result = REJECT,
6753 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6754 },
6755 {
6756 "helper access to variable memory: stack, JMP, correct bounds",
6757 .insns = {
6758 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6760 BPF_MOV64_IMM(BPF_REG_0, 0),
6761 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6762 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6763 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6764 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6765 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6766 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6767 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6768 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6769 BPF_MOV64_IMM(BPF_REG_2, 16),
6770 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6771 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6772 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
6773 BPF_MOV64_IMM(BPF_REG_4, 0),
6774 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6775 BPF_MOV64_IMM(BPF_REG_3, 0),
6776 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6777 BPF_MOV64_IMM(BPF_REG_0, 0),
6778 BPF_EXIT_INSN(),
6779 },
6780 .result = ACCEPT,
6781 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6782 },
6783 {
6784 "helper access to variable memory: stack, JMP (signed), correct bounds",
6785 .insns = {
6786 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6788 BPF_MOV64_IMM(BPF_REG_0, 0),
6789 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6790 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6791 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6792 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6793 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6794 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6795 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6796 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6797 BPF_MOV64_IMM(BPF_REG_2, 16),
6798 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6799 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6800 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
6801 BPF_MOV64_IMM(BPF_REG_4, 0),
6802 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6803 BPF_MOV64_IMM(BPF_REG_3, 0),
6804 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6805 BPF_MOV64_IMM(BPF_REG_0, 0),
6806 BPF_EXIT_INSN(),
6807 },
6808 .result = ACCEPT,
6809 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6810 },
6811 {
6812 "helper access to variable memory: stack, JMP, bounds + offset",
6813 .insns = {
6814 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6816 BPF_MOV64_IMM(BPF_REG_2, 16),
6817 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6818 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6819 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
6820 BPF_MOV64_IMM(BPF_REG_4, 0),
6821 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
6822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6823 BPF_MOV64_IMM(BPF_REG_3, 0),
6824 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6825 BPF_MOV64_IMM(BPF_REG_0, 0),
6826 BPF_EXIT_INSN(),
6827 },
6828 .errstr = "invalid stack type R1 off=-64 access_size=65",
6829 .result = REJECT,
6830 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6831 },
6832 {
6833 "helper access to variable memory: stack, JMP, wrong max",
6834 .insns = {
6835 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6837 BPF_MOV64_IMM(BPF_REG_2, 16),
6838 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6839 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6840 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
6841 BPF_MOV64_IMM(BPF_REG_4, 0),
6842 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6843 BPF_MOV64_IMM(BPF_REG_3, 0),
6844 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6845 BPF_MOV64_IMM(BPF_REG_0, 0),
6846 BPF_EXIT_INSN(),
6847 },
6848 .errstr = "invalid stack type R1 off=-64 access_size=65",
6849 .result = REJECT,
6850 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6851 },
6852 {
6853 "helper access to variable memory: stack, JMP, no max check",
6854 .insns = {
6855 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6857 BPF_MOV64_IMM(BPF_REG_2, 16),
6858 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6859 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6860 BPF_MOV64_IMM(BPF_REG_4, 0),
6861 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6862 BPF_MOV64_IMM(BPF_REG_3, 0),
6863 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6864 BPF_MOV64_IMM(BPF_REG_0, 0),
6865 BPF_EXIT_INSN(),
6866 },
Edward Creef65b1842017-08-07 15:27:12 +01006867 /* because max wasn't checked, signed min is negative */
6868 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006869 .result = REJECT,
6870 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6871 },
6872 {
6873 "helper access to variable memory: stack, JMP, no min check",
6874 .insns = {
6875 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6877 BPF_MOV64_IMM(BPF_REG_2, 16),
6878 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6879 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6880 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
6881 BPF_MOV64_IMM(BPF_REG_3, 0),
6882 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6883 BPF_MOV64_IMM(BPF_REG_0, 0),
6884 BPF_EXIT_INSN(),
6885 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006886 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006887 .result = REJECT,
6888 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6889 },
6890 {
6891 "helper access to variable memory: stack, JMP (signed), no min check",
6892 .insns = {
6893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6895 BPF_MOV64_IMM(BPF_REG_2, 16),
6896 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6897 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6898 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
6899 BPF_MOV64_IMM(BPF_REG_3, 0),
6900 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6901 BPF_MOV64_IMM(BPF_REG_0, 0),
6902 BPF_EXIT_INSN(),
6903 },
6904 .errstr = "R2 min value is negative",
6905 .result = REJECT,
6906 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6907 },
6908 {
6909 "helper access to variable memory: map, JMP, correct bounds",
6910 .insns = {
6911 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6913 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6914 BPF_LD_MAP_FD(BPF_REG_1, 0),
6915 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6916 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6917 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6918 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6919 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6920 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6921 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6922 sizeof(struct test_val), 4),
6923 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006924 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006925 BPF_MOV64_IMM(BPF_REG_3, 0),
6926 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6927 BPF_MOV64_IMM(BPF_REG_0, 0),
6928 BPF_EXIT_INSN(),
6929 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006930 .fixup_map_hash_48b = { 3 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08006931 .result = ACCEPT,
6932 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6933 },
6934 {
6935 "helper access to variable memory: map, JMP, wrong max",
6936 .insns = {
6937 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6939 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6940 BPF_LD_MAP_FD(BPF_REG_1, 0),
6941 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6942 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6943 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6944 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6945 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6946 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6947 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6948 sizeof(struct test_val) + 1, 4),
6949 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006950 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006951 BPF_MOV64_IMM(BPF_REG_3, 0),
6952 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6953 BPF_MOV64_IMM(BPF_REG_0, 0),
6954 BPF_EXIT_INSN(),
6955 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006956 .fixup_map_hash_48b = { 3 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08006957 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
6958 .result = REJECT,
6959 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6960 },
6961 {
6962 "helper access to variable memory: map adjusted, JMP, correct bounds",
6963 .insns = {
6964 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6966 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6967 BPF_LD_MAP_FD(BPF_REG_1, 0),
6968 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6969 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6970 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6972 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6973 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6974 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6975 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6976 sizeof(struct test_val) - 20, 4),
6977 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006978 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006979 BPF_MOV64_IMM(BPF_REG_3, 0),
6980 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6981 BPF_MOV64_IMM(BPF_REG_0, 0),
6982 BPF_EXIT_INSN(),
6983 },
Prashant Bhole908142e2018-10-09 10:04:53 +09006984 .fixup_map_hash_48b = { 3 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08006985 .result = ACCEPT,
6986 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6987 },
6988 {
6989 "helper access to variable memory: map adjusted, JMP, wrong max",
6990 .insns = {
6991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6993 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6994 BPF_LD_MAP_FD(BPF_REG_1, 0),
6995 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6996 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6997 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6999 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7000 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7001 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7002 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7003 sizeof(struct test_val) - 19, 4),
7004 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02007005 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007006 BPF_MOV64_IMM(BPF_REG_3, 0),
7007 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7008 BPF_MOV64_IMM(BPF_REG_0, 0),
7009 BPF_EXIT_INSN(),
7010 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007011 .fixup_map_hash_48b = { 3 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08007012 .errstr = "R1 min value is outside of the array range",
7013 .result = REJECT,
7014 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7015 },
7016 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007017 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01007018 .insns = {
7019 BPF_MOV64_IMM(BPF_REG_1, 0),
7020 BPF_MOV64_IMM(BPF_REG_2, 0),
7021 BPF_MOV64_IMM(BPF_REG_3, 0),
7022 BPF_MOV64_IMM(BPF_REG_4, 0),
7023 BPF_MOV64_IMM(BPF_REG_5, 0),
7024 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7025 BPF_EXIT_INSN(),
7026 },
7027 .result = ACCEPT,
7028 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7029 },
7030 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007031 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08007032 .insns = {
7033 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08007034 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01007035 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7036 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007037 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7038 BPF_MOV64_IMM(BPF_REG_3, 0),
7039 BPF_MOV64_IMM(BPF_REG_4, 0),
7040 BPF_MOV64_IMM(BPF_REG_5, 0),
7041 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7042 BPF_EXIT_INSN(),
7043 },
Edward Creef65b1842017-08-07 15:27:12 +01007044 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08007045 .result = REJECT,
7046 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7047 },
7048 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007049 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08007050 .insns = {
7051 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7053 BPF_MOV64_IMM(BPF_REG_2, 0),
7054 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7055 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7056 BPF_MOV64_IMM(BPF_REG_3, 0),
7057 BPF_MOV64_IMM(BPF_REG_4, 0),
7058 BPF_MOV64_IMM(BPF_REG_5, 0),
7059 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7060 BPF_EXIT_INSN(),
7061 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007062 .result = ACCEPT,
7063 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7064 },
7065 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007066 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08007067 .insns = {
7068 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7069 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7071 BPF_LD_MAP_FD(BPF_REG_1, 0),
7072 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7073 BPF_FUNC_map_lookup_elem),
7074 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7075 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7076 BPF_MOV64_IMM(BPF_REG_2, 0),
7077 BPF_MOV64_IMM(BPF_REG_3, 0),
7078 BPF_MOV64_IMM(BPF_REG_4, 0),
7079 BPF_MOV64_IMM(BPF_REG_5, 0),
7080 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7081 BPF_EXIT_INSN(),
7082 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007083 .fixup_map_hash_8b = { 3 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007084 .result = ACCEPT,
7085 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7086 },
7087 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007088 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08007089 .insns = {
7090 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7091 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7093 BPF_LD_MAP_FD(BPF_REG_1, 0),
7094 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7095 BPF_FUNC_map_lookup_elem),
7096 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7097 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7098 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7099 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7101 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7102 BPF_MOV64_IMM(BPF_REG_3, 0),
7103 BPF_MOV64_IMM(BPF_REG_4, 0),
7104 BPF_MOV64_IMM(BPF_REG_5, 0),
7105 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7106 BPF_EXIT_INSN(),
7107 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007108 .fixup_map_hash_8b = { 3 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007109 .result = ACCEPT,
7110 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7111 },
7112 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007113 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08007114 .insns = {
7115 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7116 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7118 BPF_LD_MAP_FD(BPF_REG_1, 0),
7119 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7120 BPF_FUNC_map_lookup_elem),
7121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7122 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7123 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7124 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7125 BPF_MOV64_IMM(BPF_REG_3, 0),
7126 BPF_MOV64_IMM(BPF_REG_4, 0),
7127 BPF_MOV64_IMM(BPF_REG_5, 0),
7128 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7129 BPF_EXIT_INSN(),
7130 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007131 .fixup_map_hash_8b = { 3 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08007132 .result = ACCEPT,
7133 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7134 },
7135 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007136 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08007137 .insns = {
7138 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7139 offsetof(struct __sk_buff, data)),
7140 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7141 offsetof(struct __sk_buff, data_end)),
7142 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7143 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7144 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7145 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7146 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7147 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7148 BPF_MOV64_IMM(BPF_REG_3, 0),
7149 BPF_MOV64_IMM(BPF_REG_4, 0),
7150 BPF_MOV64_IMM(BPF_REG_5, 0),
7151 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7152 BPF_EXIT_INSN(),
7153 },
7154 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08007155 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007156 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08007157 },
7158 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007159 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7160 .insns = {
7161 BPF_MOV64_IMM(BPF_REG_1, 0),
7162 BPF_MOV64_IMM(BPF_REG_2, 0),
7163 BPF_MOV64_IMM(BPF_REG_3, 0),
7164 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7165 BPF_EXIT_INSN(),
7166 },
7167 .errstr = "R1 type=inv expected=fp",
7168 .result = REJECT,
7169 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7170 },
7171 {
7172 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7173 .insns = {
7174 BPF_MOV64_IMM(BPF_REG_1, 0),
7175 BPF_MOV64_IMM(BPF_REG_2, 1),
7176 BPF_MOV64_IMM(BPF_REG_3, 0),
7177 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7178 BPF_EXIT_INSN(),
7179 },
7180 .errstr = "R1 type=inv expected=fp",
7181 .result = REJECT,
7182 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7183 },
7184 {
7185 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7186 .insns = {
7187 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7189 BPF_MOV64_IMM(BPF_REG_2, 0),
7190 BPF_MOV64_IMM(BPF_REG_3, 0),
7191 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7192 BPF_EXIT_INSN(),
7193 },
7194 .result = ACCEPT,
7195 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7196 },
7197 {
7198 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7199 .insns = {
7200 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7201 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7203 BPF_LD_MAP_FD(BPF_REG_1, 0),
7204 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7205 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7206 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7207 BPF_MOV64_IMM(BPF_REG_2, 0),
7208 BPF_MOV64_IMM(BPF_REG_3, 0),
7209 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7210 BPF_EXIT_INSN(),
7211 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007212 .fixup_map_hash_8b = { 3 },
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007213 .result = ACCEPT,
7214 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7215 },
7216 {
7217 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7218 .insns = {
7219 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7220 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7222 BPF_LD_MAP_FD(BPF_REG_1, 0),
7223 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7225 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7226 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7227 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7229 BPF_MOV64_IMM(BPF_REG_3, 0),
7230 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7231 BPF_EXIT_INSN(),
7232 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007233 .fixup_map_hash_8b = { 3 },
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007234 .result = ACCEPT,
7235 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7236 },
7237 {
7238 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7239 .insns = {
7240 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7241 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7243 BPF_LD_MAP_FD(BPF_REG_1, 0),
7244 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7245 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7247 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7248 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7249 BPF_MOV64_IMM(BPF_REG_3, 0),
7250 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7251 BPF_EXIT_INSN(),
7252 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007253 .fixup_map_hash_8b = { 3 },
Gianluca Borellodb1ac492017-11-22 18:32:53 +00007254 .result = ACCEPT,
7255 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7256 },
7257 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08007258 "helper access to variable memory: 8 bytes leak",
7259 .insns = {
7260 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7262 BPF_MOV64_IMM(BPF_REG_0, 0),
7263 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7264 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7265 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7266 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7267 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7268 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7269 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08007270 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01007271 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7272 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08007273 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7275 BPF_MOV64_IMM(BPF_REG_3, 0),
7276 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7277 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7278 BPF_EXIT_INSN(),
7279 },
7280 .errstr = "invalid indirect read from stack off -64+32 size 64",
7281 .result = REJECT,
7282 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7283 },
7284 {
7285 "helper access to variable memory: 8 bytes no leak (init memory)",
7286 .insns = {
7287 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7288 BPF_MOV64_IMM(BPF_REG_0, 0),
7289 BPF_MOV64_IMM(BPF_REG_0, 0),
7290 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7291 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7292 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7293 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7294 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7295 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7296 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7299 BPF_MOV64_IMM(BPF_REG_2, 0),
7300 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7302 BPF_MOV64_IMM(BPF_REG_3, 0),
7303 BPF_EMIT_CALL(BPF_FUNC_probe_read),
7304 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7305 BPF_EXIT_INSN(),
7306 },
7307 .result = ACCEPT,
7308 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7309 },
Josef Bacik29200c12017-02-03 16:25:23 -05007310 {
7311 "invalid and of negative number",
7312 .insns = {
7313 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7314 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7316 BPF_LD_MAP_FD(BPF_REG_1, 0),
7317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7318 BPF_FUNC_map_lookup_elem),
7319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01007320 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05007321 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7322 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7323 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7324 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7325 offsetof(struct test_val, foo)),
7326 BPF_EXIT_INSN(),
7327 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007328 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007329 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05007330 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02007331 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05007332 },
7333 {
7334 "invalid range check",
7335 .insns = {
7336 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7339 BPF_LD_MAP_FD(BPF_REG_1, 0),
7340 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7341 BPF_FUNC_map_lookup_elem),
7342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7343 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7344 BPF_MOV64_IMM(BPF_REG_9, 1),
7345 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7346 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7347 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7348 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7349 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7350 BPF_MOV32_IMM(BPF_REG_3, 1),
7351 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7352 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7353 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7354 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7355 BPF_MOV64_REG(BPF_REG_0, 0),
7356 BPF_EXIT_INSN(),
7357 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007358 .fixup_map_hash_48b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007359 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05007360 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02007361 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007362 },
7363 {
7364 "map in map access",
7365 .insns = {
7366 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7367 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7369 BPF_LD_MAP_FD(BPF_REG_1, 0),
7370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7371 BPF_FUNC_map_lookup_elem),
7372 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7373 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7374 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7376 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7377 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7378 BPF_FUNC_map_lookup_elem),
Roman Gushchin0069fb82018-08-02 15:47:10 -07007379 BPF_MOV64_IMM(BPF_REG_0, 0),
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007380 BPF_EXIT_INSN(),
7381 },
7382 .fixup_map_in_map = { 3 },
7383 .result = ACCEPT,
7384 },
7385 {
7386 "invalid inner map pointer",
7387 .insns = {
7388 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7389 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7391 BPF_LD_MAP_FD(BPF_REG_1, 0),
7392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7393 BPF_FUNC_map_lookup_elem),
7394 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7395 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7396 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7398 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7401 BPF_FUNC_map_lookup_elem),
Roman Gushchin0069fb82018-08-02 15:47:10 -07007402 BPF_MOV64_IMM(BPF_REG_0, 0),
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007403 BPF_EXIT_INSN(),
7404 },
7405 .fixup_map_in_map = { 3 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07007406 .errstr = "R1 pointer arithmetic on map_ptr prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007407 .result = REJECT,
7408 },
7409 {
7410 "forgot null checking on the inner map pointer",
7411 .insns = {
7412 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7413 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7414 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7415 BPF_LD_MAP_FD(BPF_REG_1, 0),
7416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7417 BPF_FUNC_map_lookup_elem),
7418 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7419 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7423 BPF_FUNC_map_lookup_elem),
Roman Gushchin0069fb82018-08-02 15:47:10 -07007424 BPF_MOV64_IMM(BPF_REG_0, 0),
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07007425 BPF_EXIT_INSN(),
7426 },
7427 .fixup_map_in_map = { 3 },
7428 .errstr = "R1 type=map_value_or_null expected=map_ptr",
7429 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02007430 },
7431 {
7432 "ld_abs: check calling conv, r1",
7433 .insns = {
7434 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7435 BPF_MOV64_IMM(BPF_REG_1, 0),
7436 BPF_LD_ABS(BPF_W, -0x200000),
7437 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7438 BPF_EXIT_INSN(),
7439 },
7440 .errstr = "R1 !read_ok",
7441 .result = REJECT,
7442 },
7443 {
7444 "ld_abs: check calling conv, r2",
7445 .insns = {
7446 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7447 BPF_MOV64_IMM(BPF_REG_2, 0),
7448 BPF_LD_ABS(BPF_W, -0x200000),
7449 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7450 BPF_EXIT_INSN(),
7451 },
7452 .errstr = "R2 !read_ok",
7453 .result = REJECT,
7454 },
7455 {
7456 "ld_abs: check calling conv, r3",
7457 .insns = {
7458 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7459 BPF_MOV64_IMM(BPF_REG_3, 0),
7460 BPF_LD_ABS(BPF_W, -0x200000),
7461 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7462 BPF_EXIT_INSN(),
7463 },
7464 .errstr = "R3 !read_ok",
7465 .result = REJECT,
7466 },
7467 {
7468 "ld_abs: check calling conv, r4",
7469 .insns = {
7470 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7471 BPF_MOV64_IMM(BPF_REG_4, 0),
7472 BPF_LD_ABS(BPF_W, -0x200000),
7473 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7474 BPF_EXIT_INSN(),
7475 },
7476 .errstr = "R4 !read_ok",
7477 .result = REJECT,
7478 },
7479 {
7480 "ld_abs: check calling conv, r5",
7481 .insns = {
7482 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7483 BPF_MOV64_IMM(BPF_REG_5, 0),
7484 BPF_LD_ABS(BPF_W, -0x200000),
7485 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7486 BPF_EXIT_INSN(),
7487 },
7488 .errstr = "R5 !read_ok",
7489 .result = REJECT,
7490 },
7491 {
7492 "ld_abs: check calling conv, r7",
7493 .insns = {
7494 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7495 BPF_MOV64_IMM(BPF_REG_7, 0),
7496 BPF_LD_ABS(BPF_W, -0x200000),
7497 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7498 BPF_EXIT_INSN(),
7499 },
7500 .result = ACCEPT,
7501 },
7502 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01007503 "ld_abs: tests on r6 and skb data reload helper",
7504 .insns = {
7505 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7506 BPF_LD_ABS(BPF_B, 0),
7507 BPF_LD_ABS(BPF_H, 0),
7508 BPF_LD_ABS(BPF_W, 0),
7509 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
7510 BPF_MOV64_IMM(BPF_REG_6, 0),
7511 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
7512 BPF_MOV64_IMM(BPF_REG_2, 1),
7513 BPF_MOV64_IMM(BPF_REG_3, 2),
7514 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7515 BPF_FUNC_skb_vlan_push),
7516 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
7517 BPF_LD_ABS(BPF_B, 0),
7518 BPF_LD_ABS(BPF_H, 0),
7519 BPF_LD_ABS(BPF_W, 0),
7520 BPF_MOV64_IMM(BPF_REG_0, 42),
7521 BPF_EXIT_INSN(),
7522 },
7523 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7524 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007525 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01007526 },
7527 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02007528 "ld_ind: check calling conv, r1",
7529 .insns = {
7530 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7531 BPF_MOV64_IMM(BPF_REG_1, 1),
7532 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
7533 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7534 BPF_EXIT_INSN(),
7535 },
7536 .errstr = "R1 !read_ok",
7537 .result = REJECT,
7538 },
7539 {
7540 "ld_ind: check calling conv, r2",
7541 .insns = {
7542 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7543 BPF_MOV64_IMM(BPF_REG_2, 1),
7544 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
7545 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7546 BPF_EXIT_INSN(),
7547 },
7548 .errstr = "R2 !read_ok",
7549 .result = REJECT,
7550 },
7551 {
7552 "ld_ind: check calling conv, r3",
7553 .insns = {
7554 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7555 BPF_MOV64_IMM(BPF_REG_3, 1),
7556 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
7557 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7558 BPF_EXIT_INSN(),
7559 },
7560 .errstr = "R3 !read_ok",
7561 .result = REJECT,
7562 },
7563 {
7564 "ld_ind: check calling conv, r4",
7565 .insns = {
7566 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7567 BPF_MOV64_IMM(BPF_REG_4, 1),
7568 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
7569 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7570 BPF_EXIT_INSN(),
7571 },
7572 .errstr = "R4 !read_ok",
7573 .result = REJECT,
7574 },
7575 {
7576 "ld_ind: check calling conv, r5",
7577 .insns = {
7578 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7579 BPF_MOV64_IMM(BPF_REG_5, 1),
7580 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
7581 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7582 BPF_EXIT_INSN(),
7583 },
7584 .errstr = "R5 !read_ok",
7585 .result = REJECT,
7586 },
7587 {
7588 "ld_ind: check calling conv, r7",
7589 .insns = {
7590 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7591 BPF_MOV64_IMM(BPF_REG_7, 1),
7592 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
7593 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7594 BPF_EXIT_INSN(),
7595 },
7596 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007597 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02007598 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007599 {
7600 "check bpf_perf_event_data->sample_period byte load permitted",
7601 .insns = {
7602 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007603#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007604 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7605 offsetof(struct bpf_perf_event_data, sample_period)),
7606#else
7607 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7608 offsetof(struct bpf_perf_event_data, sample_period) + 7),
7609#endif
7610 BPF_EXIT_INSN(),
7611 },
7612 .result = ACCEPT,
7613 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7614 },
7615 {
7616 "check bpf_perf_event_data->sample_period half load permitted",
7617 .insns = {
7618 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007619#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007620 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7621 offsetof(struct bpf_perf_event_data, sample_period)),
7622#else
7623 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7624 offsetof(struct bpf_perf_event_data, sample_period) + 6),
7625#endif
7626 BPF_EXIT_INSN(),
7627 },
7628 .result = ACCEPT,
7629 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7630 },
7631 {
7632 "check bpf_perf_event_data->sample_period word load permitted",
7633 .insns = {
7634 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007635#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007636 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7637 offsetof(struct bpf_perf_event_data, sample_period)),
7638#else
7639 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7640 offsetof(struct bpf_perf_event_data, sample_period) + 4),
7641#endif
7642 BPF_EXIT_INSN(),
7643 },
7644 .result = ACCEPT,
7645 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7646 },
7647 {
7648 "check bpf_perf_event_data->sample_period dword load permitted",
7649 .insns = {
7650 BPF_MOV64_IMM(BPF_REG_0, 0),
7651 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
7652 offsetof(struct bpf_perf_event_data, sample_period)),
7653 BPF_EXIT_INSN(),
7654 },
7655 .result = ACCEPT,
7656 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
7657 },
7658 {
7659 "check skb->data half load not permitted",
7660 .insns = {
7661 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007662#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007663 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7664 offsetof(struct __sk_buff, data)),
7665#else
7666 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7667 offsetof(struct __sk_buff, data) + 2),
7668#endif
7669 BPF_EXIT_INSN(),
7670 },
7671 .result = REJECT,
7672 .errstr = "invalid bpf_context access",
7673 },
7674 {
7675 "check skb->tc_classid half load not permitted for lwt prog",
7676 .insns = {
7677 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02007678#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07007679 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7680 offsetof(struct __sk_buff, tc_classid)),
7681#else
7682 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7683 offsetof(struct __sk_buff, tc_classid) + 2),
7684#endif
7685 BPF_EXIT_INSN(),
7686 },
7687 .result = REJECT,
7688 .errstr = "invalid bpf_context access",
7689 .prog_type = BPF_PROG_TYPE_LWT_IN,
7690 },
Edward Creeb712296a2017-07-21 00:00:24 +02007691 {
7692 "bounds checks mixing signed and unsigned, positive bounds",
7693 .insns = {
7694 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7697 BPF_LD_MAP_FD(BPF_REG_1, 0),
7698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7699 BPF_FUNC_map_lookup_elem),
7700 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7701 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7702 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7703 BPF_MOV64_IMM(BPF_REG_2, 2),
7704 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
7705 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
7706 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7707 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7708 BPF_MOV64_IMM(BPF_REG_0, 0),
7709 BPF_EXIT_INSN(),
7710 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007711 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007712 .errstr = "unbounded min value",
Edward Creeb712296a2017-07-21 00:00:24 +02007713 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02007714 },
7715 {
7716 "bounds checks mixing signed and unsigned",
7717 .insns = {
7718 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7719 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7721 BPF_LD_MAP_FD(BPF_REG_1, 0),
7722 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7723 BPF_FUNC_map_lookup_elem),
7724 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7725 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7726 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7727 BPF_MOV64_IMM(BPF_REG_2, -1),
7728 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7729 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7730 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7731 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7732 BPF_MOV64_IMM(BPF_REG_0, 0),
7733 BPF_EXIT_INSN(),
7734 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007735 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007736 .errstr = "unbounded min value",
Edward Creeb712296a2017-07-21 00:00:24 +02007737 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02007738 },
Daniel Borkmann86412502017-07-21 00:00:25 +02007739 {
7740 "bounds checks mixing signed and unsigned, variant 2",
7741 .insns = {
7742 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7743 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7745 BPF_LD_MAP_FD(BPF_REG_1, 0),
7746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7747 BPF_FUNC_map_lookup_elem),
7748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7749 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7750 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7751 BPF_MOV64_IMM(BPF_REG_2, -1),
7752 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7753 BPF_MOV64_IMM(BPF_REG_8, 0),
7754 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
7755 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7756 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7757 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7758 BPF_MOV64_IMM(BPF_REG_0, 0),
7759 BPF_EXIT_INSN(),
7760 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007761 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007762 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007763 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007764 },
7765 {
7766 "bounds checks mixing signed and unsigned, variant 3",
7767 .insns = {
7768 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7769 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7771 BPF_LD_MAP_FD(BPF_REG_1, 0),
7772 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7773 BPF_FUNC_map_lookup_elem),
7774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7775 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7776 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7777 BPF_MOV64_IMM(BPF_REG_2, -1),
7778 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
7779 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
7780 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7781 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7782 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7783 BPF_MOV64_IMM(BPF_REG_0, 0),
7784 BPF_EXIT_INSN(),
7785 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007786 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007787 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007788 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007789 },
7790 {
7791 "bounds checks mixing signed and unsigned, variant 4",
7792 .insns = {
7793 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7794 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7796 BPF_LD_MAP_FD(BPF_REG_1, 0),
7797 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7798 BPF_FUNC_map_lookup_elem),
7799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7800 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7801 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7802 BPF_MOV64_IMM(BPF_REG_2, 1),
7803 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
7804 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7805 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7806 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7807 BPF_MOV64_IMM(BPF_REG_0, 0),
7808 BPF_EXIT_INSN(),
7809 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007810 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007811 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007812 },
7813 {
7814 "bounds checks mixing signed and unsigned, variant 5",
7815 .insns = {
7816 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7819 BPF_LD_MAP_FD(BPF_REG_1, 0),
7820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7821 BPF_FUNC_map_lookup_elem),
7822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7823 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7824 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7825 BPF_MOV64_IMM(BPF_REG_2, -1),
7826 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7827 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
7828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
7829 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7830 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7831 BPF_MOV64_IMM(BPF_REG_0, 0),
7832 BPF_EXIT_INSN(),
7833 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007834 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007835 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007836 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007837 },
7838 {
7839 "bounds checks mixing signed and unsigned, variant 6",
7840 .insns = {
7841 BPF_MOV64_IMM(BPF_REG_2, 0),
7842 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
7843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
7844 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7845 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
7846 BPF_MOV64_IMM(BPF_REG_6, -1),
7847 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
7848 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
7849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7850 BPF_MOV64_IMM(BPF_REG_5, 0),
7851 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
7852 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7853 BPF_FUNC_skb_load_bytes),
7854 BPF_MOV64_IMM(BPF_REG_0, 0),
7855 BPF_EXIT_INSN(),
7856 },
Daniel Borkmann86412502017-07-21 00:00:25 +02007857 .errstr = "R4 min value is negative, either use unsigned",
7858 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007859 },
7860 {
7861 "bounds checks mixing signed and unsigned, variant 7",
7862 .insns = {
7863 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7864 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7866 BPF_LD_MAP_FD(BPF_REG_1, 0),
7867 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7868 BPF_FUNC_map_lookup_elem),
7869 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7870 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7871 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7872 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
7873 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7874 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7875 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7876 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7877 BPF_MOV64_IMM(BPF_REG_0, 0),
7878 BPF_EXIT_INSN(),
7879 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007880 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007881 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007882 },
7883 {
7884 "bounds checks mixing signed and unsigned, variant 8",
7885 .insns = {
7886 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7887 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7889 BPF_LD_MAP_FD(BPF_REG_1, 0),
7890 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7891 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02007892 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7893 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7894 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7895 BPF_MOV64_IMM(BPF_REG_2, -1),
7896 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7897 BPF_MOV64_IMM(BPF_REG_0, 0),
7898 BPF_EXIT_INSN(),
7899 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7900 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7901 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7902 BPF_MOV64_IMM(BPF_REG_0, 0),
7903 BPF_EXIT_INSN(),
7904 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007905 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007906 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007907 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007908 },
7909 {
Edward Creef65b1842017-08-07 15:27:12 +01007910 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02007911 .insns = {
7912 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7913 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7915 BPF_LD_MAP_FD(BPF_REG_1, 0),
7916 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7917 BPF_FUNC_map_lookup_elem),
7918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7919 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7920 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7921 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
7922 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7923 BPF_MOV64_IMM(BPF_REG_0, 0),
7924 BPF_EXIT_INSN(),
7925 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7926 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7927 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7928 BPF_MOV64_IMM(BPF_REG_0, 0),
7929 BPF_EXIT_INSN(),
7930 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007931 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007932 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007933 },
7934 {
Edward Creef65b1842017-08-07 15:27:12 +01007935 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02007936 .insns = {
7937 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7940 BPF_LD_MAP_FD(BPF_REG_1, 0),
7941 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7942 BPF_FUNC_map_lookup_elem),
7943 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7944 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7945 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7946 BPF_MOV64_IMM(BPF_REG_2, 0),
7947 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7948 BPF_MOV64_IMM(BPF_REG_0, 0),
7949 BPF_EXIT_INSN(),
7950 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7951 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7952 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7953 BPF_MOV64_IMM(BPF_REG_0, 0),
7954 BPF_EXIT_INSN(),
7955 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007956 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007957 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007958 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007959 },
7960 {
Edward Creef65b1842017-08-07 15:27:12 +01007961 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02007962 .insns = {
7963 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7964 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7966 BPF_LD_MAP_FD(BPF_REG_1, 0),
7967 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7968 BPF_FUNC_map_lookup_elem),
7969 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7970 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7971 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7972 BPF_MOV64_IMM(BPF_REG_2, -1),
7973 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7974 /* Dead branch. */
7975 BPF_MOV64_IMM(BPF_REG_0, 0),
7976 BPF_EXIT_INSN(),
7977 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7978 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7979 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7980 BPF_MOV64_IMM(BPF_REG_0, 0),
7981 BPF_EXIT_INSN(),
7982 },
Prashant Bhole908142e2018-10-09 10:04:53 +09007983 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007984 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007985 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007986 },
7987 {
Edward Creef65b1842017-08-07 15:27:12 +01007988 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02007989 .insns = {
7990 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7993 BPF_LD_MAP_FD(BPF_REG_1, 0),
7994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7995 BPF_FUNC_map_lookup_elem),
7996 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7997 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7998 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7999 BPF_MOV64_IMM(BPF_REG_2, -6),
8000 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8001 BPF_MOV64_IMM(BPF_REG_0, 0),
8002 BPF_EXIT_INSN(),
8003 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8004 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8005 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8006 BPF_MOV64_IMM(BPF_REG_0, 0),
8007 BPF_EXIT_INSN(),
8008 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008009 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008010 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008011 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008012 },
8013 {
Edward Creef65b1842017-08-07 15:27:12 +01008014 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02008015 .insns = {
8016 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8017 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8019 BPF_LD_MAP_FD(BPF_REG_1, 0),
8020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8021 BPF_FUNC_map_lookup_elem),
8022 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8023 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8024 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8025 BPF_MOV64_IMM(BPF_REG_2, 2),
8026 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8027 BPF_MOV64_IMM(BPF_REG_7, 1),
8028 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8029 BPF_MOV64_IMM(BPF_REG_0, 0),
8030 BPF_EXIT_INSN(),
8031 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8032 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8033 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8034 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8035 BPF_MOV64_IMM(BPF_REG_0, 0),
8036 BPF_EXIT_INSN(),
8037 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008038 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008039 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008040 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008041 },
8042 {
Edward Creef65b1842017-08-07 15:27:12 +01008043 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02008044 .insns = {
8045 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8046 offsetof(struct __sk_buff, mark)),
8047 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8050 BPF_LD_MAP_FD(BPF_REG_1, 0),
8051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8052 BPF_FUNC_map_lookup_elem),
8053 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8054 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8055 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8056 BPF_MOV64_IMM(BPF_REG_2, -1),
8057 BPF_MOV64_IMM(BPF_REG_8, 2),
8058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8059 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8060 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8061 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8062 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8063 BPF_MOV64_IMM(BPF_REG_0, 0),
8064 BPF_EXIT_INSN(),
8065 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8066 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8067 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008068 .fixup_map_hash_8b = { 4 },
Daniel Borkmann6f161012018-01-18 01:15:21 +01008069 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann86412502017-07-21 00:00:25 +02008070 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02008071 },
8072 {
Edward Creef65b1842017-08-07 15:27:12 +01008073 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02008074 .insns = {
8075 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8076 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8078 BPF_LD_MAP_FD(BPF_REG_1, 0),
8079 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8080 BPF_FUNC_map_lookup_elem),
8081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8082 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8083 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8084 BPF_MOV64_IMM(BPF_REG_2, -6),
8085 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8086 BPF_MOV64_IMM(BPF_REG_0, 0),
8087 BPF_EXIT_INSN(),
8088 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8089 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8090 BPF_MOV64_IMM(BPF_REG_0, 0),
8091 BPF_EXIT_INSN(),
8092 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8093 BPF_MOV64_IMM(BPF_REG_0, 0),
8094 BPF_EXIT_INSN(),
8095 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008096 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008097 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02008098 .result = REJECT,
8099 .result_unpriv = REJECT,
8100 },
Edward Cree545722c2017-07-21 14:36:57 +01008101 {
Edward Creef65b1842017-08-07 15:27:12 +01008102 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01008103 .insns = {
8104 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8105 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8107 BPF_LD_MAP_FD(BPF_REG_1, 0),
8108 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8109 BPF_FUNC_map_lookup_elem),
8110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8111 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8112 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8113 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8114 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8115 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8116 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8117 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8118 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8119 BPF_EXIT_INSN(),
8120 BPF_MOV64_IMM(BPF_REG_0, 0),
8121 BPF_EXIT_INSN(),
8122 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008123 .fixup_map_hash_8b = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01008124 .errstr = "R0 max value is outside of the array range",
8125 .result = REJECT,
8126 },
8127 {
8128 "subtraction bounds (map value) variant 2",
8129 .insns = {
8130 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8133 BPF_LD_MAP_FD(BPF_REG_1, 0),
8134 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8135 BPF_FUNC_map_lookup_elem),
8136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8137 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8138 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8139 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8140 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8141 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8142 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8143 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8144 BPF_EXIT_INSN(),
8145 BPF_MOV64_IMM(BPF_REG_0, 0),
8146 BPF_EXIT_INSN(),
8147 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008148 .fixup_map_hash_8b = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01008149 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8150 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01008151 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01008152 {
Jann Horn2255f8d2017-12-18 20:12:01 -08008153 "bounds check based on zero-extended MOV",
8154 .insns = {
8155 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8156 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8158 BPF_LD_MAP_FD(BPF_REG_1, 0),
8159 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8160 BPF_FUNC_map_lookup_elem),
8161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8162 /* r2 = 0x0000'0000'ffff'ffff */
8163 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8164 /* r2 = 0 */
8165 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8166 /* no-op */
8167 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8168 /* access at offset 0 */
8169 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8170 /* exit */
8171 BPF_MOV64_IMM(BPF_REG_0, 0),
8172 BPF_EXIT_INSN(),
8173 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008174 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008175 .result = ACCEPT
8176 },
8177 {
8178 "bounds check based on sign-extended MOV. test1",
8179 .insns = {
8180 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8181 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8183 BPF_LD_MAP_FD(BPF_REG_1, 0),
8184 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8185 BPF_FUNC_map_lookup_elem),
8186 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8187 /* r2 = 0xffff'ffff'ffff'ffff */
8188 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8189 /* r2 = 0xffff'ffff */
8190 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8191 /* r0 = <oob pointer> */
8192 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8193 /* access to OOB pointer */
8194 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8195 /* exit */
8196 BPF_MOV64_IMM(BPF_REG_0, 0),
8197 BPF_EXIT_INSN(),
8198 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008199 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008200 .errstr = "map_value pointer and 4294967295",
8201 .result = REJECT
8202 },
8203 {
8204 "bounds check based on sign-extended MOV. test2",
8205 .insns = {
8206 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8209 BPF_LD_MAP_FD(BPF_REG_1, 0),
8210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8211 BPF_FUNC_map_lookup_elem),
8212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8213 /* r2 = 0xffff'ffff'ffff'ffff */
8214 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8215 /* r2 = 0xfff'ffff */
8216 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8217 /* r0 = <oob pointer> */
8218 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8219 /* access to OOB pointer */
8220 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8221 /* exit */
8222 BPF_MOV64_IMM(BPF_REG_0, 0),
8223 BPF_EXIT_INSN(),
8224 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008225 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008226 .errstr = "R0 min value is outside of the array range",
8227 .result = REJECT
8228 },
8229 {
8230 "bounds check based on reg_off + var_off + insn_off. test1",
8231 .insns = {
8232 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8233 offsetof(struct __sk_buff, mark)),
8234 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8235 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8237 BPF_LD_MAP_FD(BPF_REG_1, 0),
8238 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8239 BPF_FUNC_map_lookup_elem),
8240 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8241 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8243 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8245 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8246 BPF_MOV64_IMM(BPF_REG_0, 0),
8247 BPF_EXIT_INSN(),
8248 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008249 .fixup_map_hash_8b = { 4 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008250 .errstr = "value_size=8 off=1073741825",
8251 .result = REJECT,
8252 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8253 },
8254 {
8255 "bounds check based on reg_off + var_off + insn_off. test2",
8256 .insns = {
8257 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8258 offsetof(struct __sk_buff, mark)),
8259 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8262 BPF_LD_MAP_FD(BPF_REG_1, 0),
8263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8264 BPF_FUNC_map_lookup_elem),
8265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8266 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8268 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8270 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8271 BPF_MOV64_IMM(BPF_REG_0, 0),
8272 BPF_EXIT_INSN(),
8273 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008274 .fixup_map_hash_8b = { 4 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008275 .errstr = "value 1073741823",
8276 .result = REJECT,
8277 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8278 },
8279 {
8280 "bounds check after truncation of non-boundary-crossing range",
8281 .insns = {
8282 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8283 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8285 BPF_LD_MAP_FD(BPF_REG_1, 0),
8286 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8287 BPF_FUNC_map_lookup_elem),
8288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8289 /* r1 = [0x00, 0xff] */
8290 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8291 BPF_MOV64_IMM(BPF_REG_2, 1),
8292 /* r2 = 0x10'0000'0000 */
8293 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8294 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8295 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8296 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8298 /* r1 = [0x00, 0xff] */
8299 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8300 /* r1 = 0 */
8301 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8302 /* no-op */
8303 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8304 /* access at offset 0 */
8305 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8306 /* exit */
8307 BPF_MOV64_IMM(BPF_REG_0, 0),
8308 BPF_EXIT_INSN(),
8309 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008310 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008311 .result = ACCEPT
8312 },
8313 {
8314 "bounds check after truncation of boundary-crossing range (1)",
8315 .insns = {
8316 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8319 BPF_LD_MAP_FD(BPF_REG_1, 0),
8320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8321 BPF_FUNC_map_lookup_elem),
8322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8323 /* r1 = [0x00, 0xff] */
8324 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8326 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8328 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8329 * [0x0000'0000, 0x0000'007f]
8330 */
8331 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8332 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8333 /* r1 = [0x00, 0xff] or
8334 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8335 */
8336 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8337 /* r1 = 0 or
8338 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8339 */
8340 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8341 /* no-op or OOB pointer computation */
8342 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8343 /* potentially OOB access */
8344 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8345 /* exit */
8346 BPF_MOV64_IMM(BPF_REG_0, 0),
8347 BPF_EXIT_INSN(),
8348 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008349 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008350 /* not actually fully unbounded, but the bound is very high */
8351 .errstr = "R0 unbounded memory access",
8352 .result = REJECT
8353 },
8354 {
8355 "bounds check after truncation of boundary-crossing range (2)",
8356 .insns = {
8357 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8358 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8360 BPF_LD_MAP_FD(BPF_REG_1, 0),
8361 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8362 BPF_FUNC_map_lookup_elem),
8363 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8364 /* r1 = [0x00, 0xff] */
8365 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8367 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
8368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8369 /* r1 = [0xffff'ff80, 0xffff'ffff] or
8370 * [0x0000'0000, 0x0000'007f]
8371 * difference to previous test: truncation via MOV32
8372 * instead of ALU32.
8373 */
8374 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8375 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8376 /* r1 = [0x00, 0xff] or
8377 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8378 */
8379 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8380 /* r1 = 0 or
8381 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8382 */
8383 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8384 /* no-op or OOB pointer computation */
8385 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8386 /* potentially OOB access */
8387 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8388 /* exit */
8389 BPF_MOV64_IMM(BPF_REG_0, 0),
8390 BPF_EXIT_INSN(),
8391 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008392 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008393 /* not actually fully unbounded, but the bound is very high */
8394 .errstr = "R0 unbounded memory access",
8395 .result = REJECT
8396 },
8397 {
8398 "bounds check after wrapping 32-bit addition",
8399 .insns = {
8400 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8403 BPF_LD_MAP_FD(BPF_REG_1, 0),
8404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8405 BPF_FUNC_map_lookup_elem),
8406 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8407 /* r1 = 0x7fff'ffff */
8408 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8409 /* r1 = 0xffff'fffe */
8410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8411 /* r1 = 0 */
8412 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8413 /* no-op */
8414 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8415 /* access at offset 0 */
8416 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8417 /* exit */
8418 BPF_MOV64_IMM(BPF_REG_0, 0),
8419 BPF_EXIT_INSN(),
8420 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008421 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008422 .result = ACCEPT
8423 },
8424 {
8425 "bounds check after shift with oversized count operand",
8426 .insns = {
8427 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8428 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8430 BPF_LD_MAP_FD(BPF_REG_1, 0),
8431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8432 BPF_FUNC_map_lookup_elem),
8433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8434 BPF_MOV64_IMM(BPF_REG_2, 32),
8435 BPF_MOV64_IMM(BPF_REG_1, 1),
8436 /* r1 = (u32)1 << (u32)32 = ? */
8437 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
8438 /* r1 = [0x0000, 0xffff] */
8439 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
8440 /* computes unknown pointer, potentially OOB */
8441 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8442 /* potentially OOB access */
8443 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8444 /* exit */
8445 BPF_MOV64_IMM(BPF_REG_0, 0),
8446 BPF_EXIT_INSN(),
8447 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008448 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008449 .errstr = "R0 max value is outside of the array range",
8450 .result = REJECT
8451 },
8452 {
8453 "bounds check after right shift of maybe-negative number",
8454 .insns = {
8455 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8456 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8458 BPF_LD_MAP_FD(BPF_REG_1, 0),
8459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8460 BPF_FUNC_map_lookup_elem),
8461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8462 /* r1 = [0x00, 0xff] */
8463 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8464 /* r1 = [-0x01, 0xfe] */
8465 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
8466 /* r1 = 0 or 0xff'ffff'ffff'ffff */
8467 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8468 /* r1 = 0 or 0xffff'ffff'ffff */
8469 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8470 /* computes unknown pointer, potentially OOB */
8471 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8472 /* potentially OOB access */
8473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8474 /* exit */
8475 BPF_MOV64_IMM(BPF_REG_0, 0),
8476 BPF_EXIT_INSN(),
8477 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008478 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008479 .errstr = "R0 unbounded memory access",
8480 .result = REJECT
8481 },
8482 {
8483 "bounds check map access with off+size signed 32bit overflow. test1",
8484 .insns = {
8485 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8486 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8488 BPF_LD_MAP_FD(BPF_REG_1, 0),
8489 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8490 BPF_FUNC_map_lookup_elem),
8491 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8492 BPF_EXIT_INSN(),
8493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
8494 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8495 BPF_JMP_A(0),
8496 BPF_EXIT_INSN(),
8497 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008498 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008499 .errstr = "map_value pointer and 2147483646",
8500 .result = REJECT
8501 },
8502 {
8503 "bounds check map access with off+size signed 32bit overflow. test2",
8504 .insns = {
8505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8508 BPF_LD_MAP_FD(BPF_REG_1, 0),
8509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8510 BPF_FUNC_map_lookup_elem),
8511 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8512 BPF_EXIT_INSN(),
8513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8516 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8517 BPF_JMP_A(0),
8518 BPF_EXIT_INSN(),
8519 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008520 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008521 .errstr = "pointer offset 1073741822",
8522 .result = REJECT
8523 },
8524 {
8525 "bounds check map access with off+size signed 32bit overflow. test3",
8526 .insns = {
8527 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8530 BPF_LD_MAP_FD(BPF_REG_1, 0),
8531 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8532 BPF_FUNC_map_lookup_elem),
8533 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8534 BPF_EXIT_INSN(),
8535 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8536 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8537 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8538 BPF_JMP_A(0),
8539 BPF_EXIT_INSN(),
8540 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008541 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008542 .errstr = "pointer offset -1073741822",
8543 .result = REJECT
8544 },
8545 {
8546 "bounds check map access with off+size signed 32bit overflow. test4",
8547 .insns = {
8548 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8549 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8551 BPF_LD_MAP_FD(BPF_REG_1, 0),
8552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8553 BPF_FUNC_map_lookup_elem),
8554 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8555 BPF_EXIT_INSN(),
8556 BPF_MOV64_IMM(BPF_REG_1, 1000000),
8557 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
8558 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8559 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8560 BPF_JMP_A(0),
8561 BPF_EXIT_INSN(),
8562 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008563 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008564 .errstr = "map_value pointer and 1000000000000",
8565 .result = REJECT
8566 },
8567 {
8568 "pointer/scalar confusion in state equality check (way 1)",
8569 .insns = {
8570 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8571 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8572 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8573 BPF_LD_MAP_FD(BPF_REG_1, 0),
8574 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8575 BPF_FUNC_map_lookup_elem),
8576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8577 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8578 BPF_JMP_A(1),
8579 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8580 BPF_JMP_A(0),
8581 BPF_EXIT_INSN(),
8582 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008583 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008584 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008585 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08008586 .result_unpriv = REJECT,
8587 .errstr_unpriv = "R0 leaks addr as return value"
8588 },
8589 {
8590 "pointer/scalar confusion in state equality check (way 2)",
8591 .insns = {
8592 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8593 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8595 BPF_LD_MAP_FD(BPF_REG_1, 0),
8596 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8597 BPF_FUNC_map_lookup_elem),
8598 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
8599 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8600 BPF_JMP_A(1),
8601 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8602 BPF_EXIT_INSN(),
8603 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008604 .fixup_map_hash_8b = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008605 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008606 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08008607 .result_unpriv = REJECT,
8608 .errstr_unpriv = "R0 leaks addr as return value"
8609 },
8610 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01008611 "variable-offset ctx access",
8612 .insns = {
8613 /* Get an unknown value */
8614 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8615 /* Make it small and 4-byte aligned */
8616 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8617 /* add it to skb. We now have either &skb->len or
8618 * &skb->pkt_type, but we don't know which
8619 */
8620 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8621 /* dereference it */
8622 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8623 BPF_EXIT_INSN(),
8624 },
8625 .errstr = "variable ctx access var_off=(0x0; 0x4)",
8626 .result = REJECT,
8627 .prog_type = BPF_PROG_TYPE_LWT_IN,
8628 },
8629 {
8630 "variable-offset stack access",
8631 .insns = {
8632 /* Fill the top 8 bytes of the stack */
8633 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8634 /* Get an unknown value */
8635 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8636 /* Make it small and 4-byte aligned */
8637 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8638 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8639 /* add it to fp. We now have either fp-4 or fp-8, but
8640 * we don't know which
8641 */
8642 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8643 /* dereference it */
8644 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
8645 BPF_EXIT_INSN(),
8646 },
8647 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
8648 .result = REJECT,
8649 .prog_type = BPF_PROG_TYPE_LWT_IN,
8650 },
Edward Creed893dc22017-08-23 15:09:46 +01008651 {
Jann Horn2255f8d2017-12-18 20:12:01 -08008652 "indirect variable-offset stack access",
8653 .insns = {
8654 /* Fill the top 8 bytes of the stack */
8655 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8656 /* Get an unknown value */
8657 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8658 /* Make it small and 4-byte aligned */
8659 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8660 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8661 /* add it to fp. We now have either fp-4 or fp-8, but
8662 * we don't know which
8663 */
8664 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8665 /* dereference it indirectly */
8666 BPF_LD_MAP_FD(BPF_REG_1, 0),
8667 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8668 BPF_FUNC_map_lookup_elem),
8669 BPF_MOV64_IMM(BPF_REG_0, 0),
8670 BPF_EXIT_INSN(),
8671 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008672 .fixup_map_hash_8b = { 5 },
Jann Horn2255f8d2017-12-18 20:12:01 -08008673 .errstr = "variable stack read R2",
8674 .result = REJECT,
8675 .prog_type = BPF_PROG_TYPE_LWT_IN,
8676 },
8677 {
8678 "direct stack access with 32-bit wraparound. test1",
8679 .insns = {
8680 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8681 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8683 BPF_MOV32_IMM(BPF_REG_0, 0),
8684 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8685 BPF_EXIT_INSN()
8686 },
8687 .errstr = "fp pointer and 2147483647",
8688 .result = REJECT
8689 },
8690 {
8691 "direct stack access with 32-bit wraparound. test2",
8692 .insns = {
8693 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8696 BPF_MOV32_IMM(BPF_REG_0, 0),
8697 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8698 BPF_EXIT_INSN()
8699 },
8700 .errstr = "fp pointer and 1073741823",
8701 .result = REJECT
8702 },
8703 {
8704 "direct stack access with 32-bit wraparound. test3",
8705 .insns = {
8706 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8709 BPF_MOV32_IMM(BPF_REG_0, 0),
8710 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8711 BPF_EXIT_INSN()
8712 },
8713 .errstr = "fp pointer offset 1073741822",
8714 .result = REJECT
8715 },
8716 {
Edward Creed893dc22017-08-23 15:09:46 +01008717 "liveness pruning and write screening",
8718 .insns = {
8719 /* Get an unknown value */
8720 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8721 /* branch conditions teach us nothing about R2 */
8722 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8723 BPF_MOV64_IMM(BPF_REG_0, 0),
8724 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8725 BPF_MOV64_IMM(BPF_REG_0, 0),
8726 BPF_EXIT_INSN(),
8727 },
8728 .errstr = "R0 !read_ok",
8729 .result = REJECT,
8730 .prog_type = BPF_PROG_TYPE_LWT_IN,
8731 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01008732 {
8733 "varlen_map_value_access pruning",
8734 .insns = {
8735 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8736 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8738 BPF_LD_MAP_FD(BPF_REG_1, 0),
8739 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8740 BPF_FUNC_map_lookup_elem),
8741 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8742 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8743 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
8744 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
8745 BPF_MOV32_IMM(BPF_REG_1, 0),
8746 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
8747 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8748 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
8749 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
8750 offsetof(struct test_val, foo)),
8751 BPF_EXIT_INSN(),
8752 },
Prashant Bhole908142e2018-10-09 10:04:53 +09008753 .fixup_map_hash_48b = { 3 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01008754 .errstr_unpriv = "R0 leaks addr",
8755 .errstr = "R0 unbounded memory access",
8756 .result_unpriv = REJECT,
8757 .result = REJECT,
8758 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8759 },
Edward Creee67b8a62017-09-15 14:37:38 +01008760 {
8761 "invalid 64-bit BPF_END",
8762 .insns = {
8763 BPF_MOV32_IMM(BPF_REG_0, 0),
8764 {
8765 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
8766 .dst_reg = BPF_REG_0,
8767 .src_reg = 0,
8768 .off = 0,
8769 .imm = 32,
8770 },
8771 BPF_EXIT_INSN(),
8772 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01008773 .errstr = "unknown opcode d7",
Edward Creee67b8a62017-09-15 14:37:38 +01008774 .result = REJECT,
8775 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02008776 {
Daniel Borkmann65073a62018-01-31 12:58:56 +01008777 "XDP, using ifindex from netdev",
8778 .insns = {
8779 BPF_MOV64_IMM(BPF_REG_0, 0),
8780 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8781 offsetof(struct xdp_md, ingress_ifindex)),
8782 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
8783 BPF_MOV64_IMM(BPF_REG_0, 1),
8784 BPF_EXIT_INSN(),
8785 },
8786 .result = ACCEPT,
8787 .prog_type = BPF_PROG_TYPE_XDP,
8788 .retval = 1,
8789 },
8790 {
Daniel Borkmann22c88522017-09-25 02:25:53 +02008791 "meta access, test1",
8792 .insns = {
8793 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8794 offsetof(struct xdp_md, data_meta)),
8795 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8796 offsetof(struct xdp_md, data)),
8797 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8799 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8800 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8801 BPF_MOV64_IMM(BPF_REG_0, 0),
8802 BPF_EXIT_INSN(),
8803 },
8804 .result = ACCEPT,
8805 .prog_type = BPF_PROG_TYPE_XDP,
8806 },
8807 {
8808 "meta access, test2",
8809 .insns = {
8810 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8811 offsetof(struct xdp_md, data_meta)),
8812 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8813 offsetof(struct xdp_md, data)),
8814 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8815 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
8816 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8818 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8819 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8820 BPF_MOV64_IMM(BPF_REG_0, 0),
8821 BPF_EXIT_INSN(),
8822 },
8823 .result = REJECT,
8824 .errstr = "invalid access to packet, off=-8",
8825 .prog_type = BPF_PROG_TYPE_XDP,
8826 },
8827 {
8828 "meta access, test3",
8829 .insns = {
8830 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8831 offsetof(struct xdp_md, data_meta)),
8832 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8833 offsetof(struct xdp_md, data_end)),
8834 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8836 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8837 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8838 BPF_MOV64_IMM(BPF_REG_0, 0),
8839 BPF_EXIT_INSN(),
8840 },
8841 .result = REJECT,
8842 .errstr = "invalid access to packet",
8843 .prog_type = BPF_PROG_TYPE_XDP,
8844 },
8845 {
8846 "meta access, test4",
8847 .insns = {
8848 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8849 offsetof(struct xdp_md, data_meta)),
8850 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8851 offsetof(struct xdp_md, data_end)),
8852 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8853 offsetof(struct xdp_md, data)),
8854 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8855 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8856 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8857 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8858 BPF_MOV64_IMM(BPF_REG_0, 0),
8859 BPF_EXIT_INSN(),
8860 },
8861 .result = REJECT,
8862 .errstr = "invalid access to packet",
8863 .prog_type = BPF_PROG_TYPE_XDP,
8864 },
8865 {
8866 "meta access, test5",
8867 .insns = {
8868 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8869 offsetof(struct xdp_md, data_meta)),
8870 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8871 offsetof(struct xdp_md, data)),
8872 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8874 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
8875 BPF_MOV64_IMM(BPF_REG_2, -8),
8876 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8877 BPF_FUNC_xdp_adjust_meta),
8878 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8879 BPF_MOV64_IMM(BPF_REG_0, 0),
8880 BPF_EXIT_INSN(),
8881 },
8882 .result = REJECT,
8883 .errstr = "R3 !read_ok",
8884 .prog_type = BPF_PROG_TYPE_XDP,
8885 },
8886 {
8887 "meta access, test6",
8888 .insns = {
8889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8890 offsetof(struct xdp_md, data_meta)),
8891 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8892 offsetof(struct xdp_md, data)),
8893 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8895 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8897 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
8898 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8899 BPF_MOV64_IMM(BPF_REG_0, 0),
8900 BPF_EXIT_INSN(),
8901 },
8902 .result = REJECT,
8903 .errstr = "invalid access to packet",
8904 .prog_type = BPF_PROG_TYPE_XDP,
8905 },
8906 {
8907 "meta access, test7",
8908 .insns = {
8909 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8910 offsetof(struct xdp_md, data_meta)),
8911 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8912 offsetof(struct xdp_md, data)),
8913 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8915 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8917 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8918 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8919 BPF_MOV64_IMM(BPF_REG_0, 0),
8920 BPF_EXIT_INSN(),
8921 },
8922 .result = ACCEPT,
8923 .prog_type = BPF_PROG_TYPE_XDP,
8924 },
8925 {
8926 "meta access, test8",
8927 .insns = {
8928 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8929 offsetof(struct xdp_md, data_meta)),
8930 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8931 offsetof(struct xdp_md, data)),
8932 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8934 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8935 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8936 BPF_MOV64_IMM(BPF_REG_0, 0),
8937 BPF_EXIT_INSN(),
8938 },
8939 .result = ACCEPT,
8940 .prog_type = BPF_PROG_TYPE_XDP,
8941 },
8942 {
8943 "meta access, test9",
8944 .insns = {
8945 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8946 offsetof(struct xdp_md, data_meta)),
8947 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8948 offsetof(struct xdp_md, data)),
8949 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
8951 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8952 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8953 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8954 BPF_MOV64_IMM(BPF_REG_0, 0),
8955 BPF_EXIT_INSN(),
8956 },
8957 .result = REJECT,
8958 .errstr = "invalid access to packet",
8959 .prog_type = BPF_PROG_TYPE_XDP,
8960 },
8961 {
8962 "meta access, test10",
8963 .insns = {
8964 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8965 offsetof(struct xdp_md, data_meta)),
8966 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8967 offsetof(struct xdp_md, data)),
8968 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8969 offsetof(struct xdp_md, data_end)),
8970 BPF_MOV64_IMM(BPF_REG_5, 42),
8971 BPF_MOV64_IMM(BPF_REG_6, 24),
8972 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8973 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8974 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8975 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8976 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
8977 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8978 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8979 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8980 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
8981 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
8982 BPF_MOV64_IMM(BPF_REG_0, 0),
8983 BPF_EXIT_INSN(),
8984 },
8985 .result = REJECT,
8986 .errstr = "invalid access to packet",
8987 .prog_type = BPF_PROG_TYPE_XDP,
8988 },
8989 {
8990 "meta access, test11",
8991 .insns = {
8992 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8993 offsetof(struct xdp_md, data_meta)),
8994 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8995 offsetof(struct xdp_md, data)),
8996 BPF_MOV64_IMM(BPF_REG_5, 42),
8997 BPF_MOV64_IMM(BPF_REG_6, 24),
8998 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8999 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9000 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9001 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9002 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9003 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9004 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9006 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9007 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9008 BPF_MOV64_IMM(BPF_REG_0, 0),
9009 BPF_EXIT_INSN(),
9010 },
9011 .result = ACCEPT,
9012 .prog_type = BPF_PROG_TYPE_XDP,
9013 },
9014 {
9015 "meta access, test12",
9016 .insns = {
9017 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9018 offsetof(struct xdp_md, data_meta)),
9019 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9020 offsetof(struct xdp_md, data)),
9021 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9022 offsetof(struct xdp_md, data_end)),
9023 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9025 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9026 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9027 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9028 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9029 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9030 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9031 BPF_MOV64_IMM(BPF_REG_0, 0),
9032 BPF_EXIT_INSN(),
9033 },
9034 .result = ACCEPT,
9035 .prog_type = BPF_PROG_TYPE_XDP,
9036 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07009037 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07009038 "arithmetic ops make PTR_TO_CTX unusable",
9039 .insns = {
9040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9041 offsetof(struct __sk_buff, data) -
9042 offsetof(struct __sk_buff, mark)),
9043 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9044 offsetof(struct __sk_buff, mark)),
9045 BPF_EXIT_INSN(),
9046 },
Daniel Borkmann58990d12018-06-07 17:40:03 +02009047 .errstr = "dereference of modified ctx ptr",
Jakub Kicinski28e33f92017-10-16 11:16:55 -07009048 .result = REJECT,
9049 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9050 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02009051 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08009052 "pkt_end - pkt_start is allowed",
9053 .insns = {
9054 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9055 offsetof(struct __sk_buff, data_end)),
9056 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9057 offsetof(struct __sk_buff, data)),
9058 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9059 BPF_EXIT_INSN(),
9060 },
9061 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009062 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08009063 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9064 },
9065 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02009066 "XDP pkt read, pkt_end mangling, bad access 1",
9067 .insns = {
9068 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9069 offsetof(struct xdp_md, data)),
9070 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9071 offsetof(struct xdp_md, data_end)),
9072 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9074 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9075 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9076 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9077 BPF_MOV64_IMM(BPF_REG_0, 0),
9078 BPF_EXIT_INSN(),
9079 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07009080 .errstr = "R3 pointer arithmetic on pkt_end",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02009081 .result = REJECT,
9082 .prog_type = BPF_PROG_TYPE_XDP,
9083 },
9084 {
9085 "XDP pkt read, pkt_end mangling, bad access 2",
9086 .insns = {
9087 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9088 offsetof(struct xdp_md, data)),
9089 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9090 offsetof(struct xdp_md, data_end)),
9091 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9093 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9094 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9095 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9096 BPF_MOV64_IMM(BPF_REG_0, 0),
9097 BPF_EXIT_INSN(),
9098 },
Joe Stringeraad2eea2018-10-02 13:35:30 -07009099 .errstr = "R3 pointer arithmetic on pkt_end",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02009100 .result = REJECT,
9101 .prog_type = BPF_PROG_TYPE_XDP,
9102 },
9103 {
9104 "XDP pkt read, pkt_data' > pkt_end, good access",
9105 .insns = {
9106 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9107 offsetof(struct xdp_md, data)),
9108 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9109 offsetof(struct xdp_md, data_end)),
9110 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9112 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9113 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9114 BPF_MOV64_IMM(BPF_REG_0, 0),
9115 BPF_EXIT_INSN(),
9116 },
9117 .result = ACCEPT,
9118 .prog_type = BPF_PROG_TYPE_XDP,
9119 },
9120 {
9121 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
9122 .insns = {
9123 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9124 offsetof(struct xdp_md, data)),
9125 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9126 offsetof(struct xdp_md, data_end)),
9127 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9128 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9129 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9130 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9131 BPF_MOV64_IMM(BPF_REG_0, 0),
9132 BPF_EXIT_INSN(),
9133 },
9134 .errstr = "R1 offset is outside of the packet",
9135 .result = REJECT,
9136 .prog_type = BPF_PROG_TYPE_XDP,
9137 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9138 },
9139 {
9140 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
9141 .insns = {
9142 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9143 offsetof(struct xdp_md, data)),
9144 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9145 offsetof(struct xdp_md, data_end)),
9146 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9148 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9149 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9150 BPF_MOV64_IMM(BPF_REG_0, 0),
9151 BPF_EXIT_INSN(),
9152 },
9153 .errstr = "R1 offset is outside of the packet",
9154 .result = REJECT,
9155 .prog_type = BPF_PROG_TYPE_XDP,
9156 },
9157 {
9158 "XDP pkt read, pkt_end > pkt_data', good access",
9159 .insns = {
9160 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9161 offsetof(struct xdp_md, data)),
9162 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9163 offsetof(struct xdp_md, data_end)),
9164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9166 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9167 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9168 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9169 BPF_MOV64_IMM(BPF_REG_0, 0),
9170 BPF_EXIT_INSN(),
9171 },
9172 .result = ACCEPT,
9173 .prog_type = BPF_PROG_TYPE_XDP,
9174 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9175 },
9176 {
9177 "XDP pkt read, pkt_end > pkt_data', bad access 1",
9178 .insns = {
9179 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9180 offsetof(struct xdp_md, data)),
9181 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9182 offsetof(struct xdp_md, data_end)),
9183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9185 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9186 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9187 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9188 BPF_MOV64_IMM(BPF_REG_0, 0),
9189 BPF_EXIT_INSN(),
9190 },
9191 .errstr = "R1 offset is outside of the packet",
9192 .result = REJECT,
9193 .prog_type = BPF_PROG_TYPE_XDP,
9194 },
9195 {
9196 "XDP pkt read, pkt_end > pkt_data', bad access 2",
9197 .insns = {
9198 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9199 offsetof(struct xdp_md, data)),
9200 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9201 offsetof(struct xdp_md, data_end)),
9202 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9204 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9205 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9206 BPF_MOV64_IMM(BPF_REG_0, 0),
9207 BPF_EXIT_INSN(),
9208 },
9209 .errstr = "R1 offset is outside of the packet",
9210 .result = REJECT,
9211 .prog_type = BPF_PROG_TYPE_XDP,
9212 },
9213 {
9214 "XDP pkt read, pkt_data' < pkt_end, good access",
9215 .insns = {
9216 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9217 offsetof(struct xdp_md, data)),
9218 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9219 offsetof(struct xdp_md, data_end)),
9220 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9222 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9223 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9224 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9225 BPF_MOV64_IMM(BPF_REG_0, 0),
9226 BPF_EXIT_INSN(),
9227 },
9228 .result = ACCEPT,
9229 .prog_type = BPF_PROG_TYPE_XDP,
9230 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9231 },
9232 {
9233 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
9234 .insns = {
9235 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9236 offsetof(struct xdp_md, data)),
9237 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9238 offsetof(struct xdp_md, data_end)),
9239 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9241 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9242 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9243 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9244 BPF_MOV64_IMM(BPF_REG_0, 0),
9245 BPF_EXIT_INSN(),
9246 },
9247 .errstr = "R1 offset is outside of the packet",
9248 .result = REJECT,
9249 .prog_type = BPF_PROG_TYPE_XDP,
9250 },
9251 {
9252 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
9253 .insns = {
9254 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9255 offsetof(struct xdp_md, data)),
9256 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9257 offsetof(struct xdp_md, data_end)),
9258 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9260 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9261 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9262 BPF_MOV64_IMM(BPF_REG_0, 0),
9263 BPF_EXIT_INSN(),
9264 },
9265 .errstr = "R1 offset is outside of the packet",
9266 .result = REJECT,
9267 .prog_type = BPF_PROG_TYPE_XDP,
9268 },
9269 {
9270 "XDP pkt read, pkt_end < pkt_data', good access",
9271 .insns = {
9272 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9273 offsetof(struct xdp_md, data)),
9274 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9275 offsetof(struct xdp_md, data_end)),
9276 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9277 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9278 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9279 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9280 BPF_MOV64_IMM(BPF_REG_0, 0),
9281 BPF_EXIT_INSN(),
9282 },
9283 .result = ACCEPT,
9284 .prog_type = BPF_PROG_TYPE_XDP,
9285 },
9286 {
9287 "XDP pkt read, pkt_end < pkt_data', bad access 1",
9288 .insns = {
9289 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9290 offsetof(struct xdp_md, data)),
9291 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9292 offsetof(struct xdp_md, data_end)),
9293 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9295 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9296 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9297 BPF_MOV64_IMM(BPF_REG_0, 0),
9298 BPF_EXIT_INSN(),
9299 },
9300 .errstr = "R1 offset is outside of the packet",
9301 .result = REJECT,
9302 .prog_type = BPF_PROG_TYPE_XDP,
9303 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9304 },
9305 {
9306 "XDP pkt read, pkt_end < pkt_data', bad access 2",
9307 .insns = {
9308 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9309 offsetof(struct xdp_md, data)),
9310 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9311 offsetof(struct xdp_md, data_end)),
9312 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9314 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9315 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9316 BPF_MOV64_IMM(BPF_REG_0, 0),
9317 BPF_EXIT_INSN(),
9318 },
9319 .errstr = "R1 offset is outside of the packet",
9320 .result = REJECT,
9321 .prog_type = BPF_PROG_TYPE_XDP,
9322 },
9323 {
9324 "XDP pkt read, pkt_data' >= pkt_end, good access",
9325 .insns = {
9326 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9327 offsetof(struct xdp_md, data)),
9328 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9329 offsetof(struct xdp_md, data_end)),
9330 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9332 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9333 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9334 BPF_MOV64_IMM(BPF_REG_0, 0),
9335 BPF_EXIT_INSN(),
9336 },
9337 .result = ACCEPT,
9338 .prog_type = BPF_PROG_TYPE_XDP,
9339 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9340 },
9341 {
9342 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9343 .insns = {
9344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9345 offsetof(struct xdp_md, data)),
9346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9347 offsetof(struct xdp_md, data_end)),
9348 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9350 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9351 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9352 BPF_MOV64_IMM(BPF_REG_0, 0),
9353 BPF_EXIT_INSN(),
9354 },
9355 .errstr = "R1 offset is outside of the packet",
9356 .result = REJECT,
9357 .prog_type = BPF_PROG_TYPE_XDP,
9358 },
9359 {
9360 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9361 .insns = {
9362 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9363 offsetof(struct xdp_md, data)),
9364 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9365 offsetof(struct xdp_md, data_end)),
9366 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9368 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9369 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9370 BPF_MOV64_IMM(BPF_REG_0, 0),
9371 BPF_EXIT_INSN(),
9372 },
9373 .errstr = "R1 offset is outside of the packet",
9374 .result = REJECT,
9375 .prog_type = BPF_PROG_TYPE_XDP,
9376 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9377 },
9378 {
9379 "XDP pkt read, pkt_end >= pkt_data', good access",
9380 .insns = {
9381 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9382 offsetof(struct xdp_md, data)),
9383 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9384 offsetof(struct xdp_md, data_end)),
9385 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9387 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9388 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9389 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9390 BPF_MOV64_IMM(BPF_REG_0, 0),
9391 BPF_EXIT_INSN(),
9392 },
9393 .result = ACCEPT,
9394 .prog_type = BPF_PROG_TYPE_XDP,
9395 },
9396 {
9397 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
9398 .insns = {
9399 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9400 offsetof(struct xdp_md, data)),
9401 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9402 offsetof(struct xdp_md, data_end)),
9403 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9405 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9406 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9407 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9408 BPF_MOV64_IMM(BPF_REG_0, 0),
9409 BPF_EXIT_INSN(),
9410 },
9411 .errstr = "R1 offset is outside of the packet",
9412 .result = REJECT,
9413 .prog_type = BPF_PROG_TYPE_XDP,
9414 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9415 },
9416 {
9417 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
9418 .insns = {
9419 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9420 offsetof(struct xdp_md, data)),
9421 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9422 offsetof(struct xdp_md, data_end)),
9423 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9425 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9426 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9427 BPF_MOV64_IMM(BPF_REG_0, 0),
9428 BPF_EXIT_INSN(),
9429 },
9430 .errstr = "R1 offset is outside of the packet",
9431 .result = REJECT,
9432 .prog_type = BPF_PROG_TYPE_XDP,
9433 },
9434 {
9435 "XDP pkt read, pkt_data' <= pkt_end, good access",
9436 .insns = {
9437 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9438 offsetof(struct xdp_md, data)),
9439 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9440 offsetof(struct xdp_md, data_end)),
9441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9443 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9444 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9445 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9446 BPF_MOV64_IMM(BPF_REG_0, 0),
9447 BPF_EXIT_INSN(),
9448 },
9449 .result = ACCEPT,
9450 .prog_type = BPF_PROG_TYPE_XDP,
9451 },
9452 {
9453 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
9454 .insns = {
9455 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9456 offsetof(struct xdp_md, data)),
9457 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9458 offsetof(struct xdp_md, data_end)),
9459 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9461 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9462 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9463 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9464 BPF_MOV64_IMM(BPF_REG_0, 0),
9465 BPF_EXIT_INSN(),
9466 },
9467 .errstr = "R1 offset is outside of the packet",
9468 .result = REJECT,
9469 .prog_type = BPF_PROG_TYPE_XDP,
9470 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9471 },
9472 {
9473 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
9474 .insns = {
9475 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9476 offsetof(struct xdp_md, data)),
9477 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9478 offsetof(struct xdp_md, data_end)),
9479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9481 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9482 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9483 BPF_MOV64_IMM(BPF_REG_0, 0),
9484 BPF_EXIT_INSN(),
9485 },
9486 .errstr = "R1 offset is outside of the packet",
9487 .result = REJECT,
9488 .prog_type = BPF_PROG_TYPE_XDP,
9489 },
9490 {
9491 "XDP pkt read, pkt_end <= pkt_data', good access",
9492 .insns = {
9493 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9494 offsetof(struct xdp_md, data)),
9495 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9496 offsetof(struct xdp_md, data_end)),
9497 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9499 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9500 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9501 BPF_MOV64_IMM(BPF_REG_0, 0),
9502 BPF_EXIT_INSN(),
9503 },
9504 .result = ACCEPT,
9505 .prog_type = BPF_PROG_TYPE_XDP,
9506 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9507 },
9508 {
9509 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
9510 .insns = {
9511 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9512 offsetof(struct xdp_md, data)),
9513 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9514 offsetof(struct xdp_md, data_end)),
9515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9517 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9518 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9519 BPF_MOV64_IMM(BPF_REG_0, 0),
9520 BPF_EXIT_INSN(),
9521 },
9522 .errstr = "R1 offset is outside of the packet",
9523 .result = REJECT,
9524 .prog_type = BPF_PROG_TYPE_XDP,
9525 },
9526 {
9527 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
9528 .insns = {
9529 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9530 offsetof(struct xdp_md, data)),
9531 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9532 offsetof(struct xdp_md, data_end)),
9533 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9535 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9536 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9537 BPF_MOV64_IMM(BPF_REG_0, 0),
9538 BPF_EXIT_INSN(),
9539 },
9540 .errstr = "R1 offset is outside of the packet",
9541 .result = REJECT,
9542 .prog_type = BPF_PROG_TYPE_XDP,
9543 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9544 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01009545 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01009546 "XDP pkt read, pkt_meta' > pkt_data, good access",
9547 .insns = {
9548 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9549 offsetof(struct xdp_md, data_meta)),
9550 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9551 offsetof(struct xdp_md, data)),
9552 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9554 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9555 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9556 BPF_MOV64_IMM(BPF_REG_0, 0),
9557 BPF_EXIT_INSN(),
9558 },
9559 .result = ACCEPT,
9560 .prog_type = BPF_PROG_TYPE_XDP,
9561 },
9562 {
9563 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
9564 .insns = {
9565 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9566 offsetof(struct xdp_md, data_meta)),
9567 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9568 offsetof(struct xdp_md, data)),
9569 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9571 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9572 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9573 BPF_MOV64_IMM(BPF_REG_0, 0),
9574 BPF_EXIT_INSN(),
9575 },
9576 .errstr = "R1 offset is outside of the packet",
9577 .result = REJECT,
9578 .prog_type = BPF_PROG_TYPE_XDP,
9579 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9580 },
9581 {
9582 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
9583 .insns = {
9584 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9585 offsetof(struct xdp_md, data_meta)),
9586 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9587 offsetof(struct xdp_md, data)),
9588 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9590 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9591 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9592 BPF_MOV64_IMM(BPF_REG_0, 0),
9593 BPF_EXIT_INSN(),
9594 },
9595 .errstr = "R1 offset is outside of the packet",
9596 .result = REJECT,
9597 .prog_type = BPF_PROG_TYPE_XDP,
9598 },
9599 {
9600 "XDP pkt read, pkt_data > pkt_meta', good access",
9601 .insns = {
9602 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9603 offsetof(struct xdp_md, data_meta)),
9604 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9605 offsetof(struct xdp_md, data)),
9606 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9608 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9609 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9610 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9611 BPF_MOV64_IMM(BPF_REG_0, 0),
9612 BPF_EXIT_INSN(),
9613 },
9614 .result = ACCEPT,
9615 .prog_type = BPF_PROG_TYPE_XDP,
9616 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9617 },
9618 {
9619 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
9620 .insns = {
9621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9622 offsetof(struct xdp_md, data_meta)),
9623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9624 offsetof(struct xdp_md, data)),
9625 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9627 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9628 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9629 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9630 BPF_MOV64_IMM(BPF_REG_0, 0),
9631 BPF_EXIT_INSN(),
9632 },
9633 .errstr = "R1 offset is outside of the packet",
9634 .result = REJECT,
9635 .prog_type = BPF_PROG_TYPE_XDP,
9636 },
9637 {
9638 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
9639 .insns = {
9640 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9641 offsetof(struct xdp_md, data_meta)),
9642 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9643 offsetof(struct xdp_md, data)),
9644 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9646 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9647 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9648 BPF_MOV64_IMM(BPF_REG_0, 0),
9649 BPF_EXIT_INSN(),
9650 },
9651 .errstr = "R1 offset is outside of the packet",
9652 .result = REJECT,
9653 .prog_type = BPF_PROG_TYPE_XDP,
9654 },
9655 {
9656 "XDP pkt read, pkt_meta' < pkt_data, good access",
9657 .insns = {
9658 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9659 offsetof(struct xdp_md, data_meta)),
9660 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9661 offsetof(struct xdp_md, data)),
9662 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9664 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9665 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9666 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9667 BPF_MOV64_IMM(BPF_REG_0, 0),
9668 BPF_EXIT_INSN(),
9669 },
9670 .result = ACCEPT,
9671 .prog_type = BPF_PROG_TYPE_XDP,
9672 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9673 },
9674 {
9675 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
9676 .insns = {
9677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9678 offsetof(struct xdp_md, data_meta)),
9679 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9680 offsetof(struct xdp_md, data)),
9681 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9683 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9684 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9686 BPF_MOV64_IMM(BPF_REG_0, 0),
9687 BPF_EXIT_INSN(),
9688 },
9689 .errstr = "R1 offset is outside of the packet",
9690 .result = REJECT,
9691 .prog_type = BPF_PROG_TYPE_XDP,
9692 },
9693 {
9694 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
9695 .insns = {
9696 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9697 offsetof(struct xdp_md, data_meta)),
9698 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9699 offsetof(struct xdp_md, data)),
9700 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9702 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9703 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9704 BPF_MOV64_IMM(BPF_REG_0, 0),
9705 BPF_EXIT_INSN(),
9706 },
9707 .errstr = "R1 offset is outside of the packet",
9708 .result = REJECT,
9709 .prog_type = BPF_PROG_TYPE_XDP,
9710 },
9711 {
9712 "XDP pkt read, pkt_data < pkt_meta', good access",
9713 .insns = {
9714 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9715 offsetof(struct xdp_md, data_meta)),
9716 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9717 offsetof(struct xdp_md, data)),
9718 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9720 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9721 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9722 BPF_MOV64_IMM(BPF_REG_0, 0),
9723 BPF_EXIT_INSN(),
9724 },
9725 .result = ACCEPT,
9726 .prog_type = BPF_PROG_TYPE_XDP,
9727 },
9728 {
9729 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
9730 .insns = {
9731 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9732 offsetof(struct xdp_md, data_meta)),
9733 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9734 offsetof(struct xdp_md, data)),
9735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9737 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9738 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9739 BPF_MOV64_IMM(BPF_REG_0, 0),
9740 BPF_EXIT_INSN(),
9741 },
9742 .errstr = "R1 offset is outside of the packet",
9743 .result = REJECT,
9744 .prog_type = BPF_PROG_TYPE_XDP,
9745 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9746 },
9747 {
9748 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
9749 .insns = {
9750 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9751 offsetof(struct xdp_md, data_meta)),
9752 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9753 offsetof(struct xdp_md, data)),
9754 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9756 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9757 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9758 BPF_MOV64_IMM(BPF_REG_0, 0),
9759 BPF_EXIT_INSN(),
9760 },
9761 .errstr = "R1 offset is outside of the packet",
9762 .result = REJECT,
9763 .prog_type = BPF_PROG_TYPE_XDP,
9764 },
9765 {
9766 "XDP pkt read, pkt_meta' >= pkt_data, good access",
9767 .insns = {
9768 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9769 offsetof(struct xdp_md, data_meta)),
9770 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9771 offsetof(struct xdp_md, data)),
9772 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9774 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9775 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9776 BPF_MOV64_IMM(BPF_REG_0, 0),
9777 BPF_EXIT_INSN(),
9778 },
9779 .result = ACCEPT,
9780 .prog_type = BPF_PROG_TYPE_XDP,
9781 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9782 },
9783 {
9784 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
9785 .insns = {
9786 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9787 offsetof(struct xdp_md, data_meta)),
9788 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9789 offsetof(struct xdp_md, data)),
9790 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9792 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9793 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9794 BPF_MOV64_IMM(BPF_REG_0, 0),
9795 BPF_EXIT_INSN(),
9796 },
9797 .errstr = "R1 offset is outside of the packet",
9798 .result = REJECT,
9799 .prog_type = BPF_PROG_TYPE_XDP,
9800 },
9801 {
9802 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
9803 .insns = {
9804 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9805 offsetof(struct xdp_md, data_meta)),
9806 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9807 offsetof(struct xdp_md, data)),
9808 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9810 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9811 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9812 BPF_MOV64_IMM(BPF_REG_0, 0),
9813 BPF_EXIT_INSN(),
9814 },
9815 .errstr = "R1 offset is outside of the packet",
9816 .result = REJECT,
9817 .prog_type = BPF_PROG_TYPE_XDP,
9818 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9819 },
9820 {
9821 "XDP pkt read, pkt_data >= pkt_meta', good access",
9822 .insns = {
9823 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9824 offsetof(struct xdp_md, data_meta)),
9825 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9826 offsetof(struct xdp_md, data)),
9827 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9829 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9830 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9831 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9832 BPF_MOV64_IMM(BPF_REG_0, 0),
9833 BPF_EXIT_INSN(),
9834 },
9835 .result = ACCEPT,
9836 .prog_type = BPF_PROG_TYPE_XDP,
9837 },
9838 {
9839 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
9840 .insns = {
9841 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9842 offsetof(struct xdp_md, data_meta)),
9843 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9844 offsetof(struct xdp_md, data)),
9845 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9846 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9847 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9848 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9849 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9850 BPF_MOV64_IMM(BPF_REG_0, 0),
9851 BPF_EXIT_INSN(),
9852 },
9853 .errstr = "R1 offset is outside of the packet",
9854 .result = REJECT,
9855 .prog_type = BPF_PROG_TYPE_XDP,
9856 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9857 },
9858 {
9859 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
9860 .insns = {
9861 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9862 offsetof(struct xdp_md, data_meta)),
9863 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9864 offsetof(struct xdp_md, data)),
9865 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9866 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9867 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9868 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9869 BPF_MOV64_IMM(BPF_REG_0, 0),
9870 BPF_EXIT_INSN(),
9871 },
9872 .errstr = "R1 offset is outside of the packet",
9873 .result = REJECT,
9874 .prog_type = BPF_PROG_TYPE_XDP,
9875 },
9876 {
9877 "XDP pkt read, pkt_meta' <= pkt_data, good access",
9878 .insns = {
9879 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9880 offsetof(struct xdp_md, data_meta)),
9881 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9882 offsetof(struct xdp_md, data)),
9883 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9885 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9886 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9887 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9888 BPF_MOV64_IMM(BPF_REG_0, 0),
9889 BPF_EXIT_INSN(),
9890 },
9891 .result = ACCEPT,
9892 .prog_type = BPF_PROG_TYPE_XDP,
9893 },
9894 {
9895 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
9896 .insns = {
9897 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9898 offsetof(struct xdp_md, data_meta)),
9899 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9900 offsetof(struct xdp_md, data)),
9901 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9903 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9904 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9905 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9906 BPF_MOV64_IMM(BPF_REG_0, 0),
9907 BPF_EXIT_INSN(),
9908 },
9909 .errstr = "R1 offset is outside of the packet",
9910 .result = REJECT,
9911 .prog_type = BPF_PROG_TYPE_XDP,
9912 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9913 },
9914 {
9915 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
9916 .insns = {
9917 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9918 offsetof(struct xdp_md, data_meta)),
9919 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9920 offsetof(struct xdp_md, data)),
9921 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9923 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9924 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9925 BPF_MOV64_IMM(BPF_REG_0, 0),
9926 BPF_EXIT_INSN(),
9927 },
9928 .errstr = "R1 offset is outside of the packet",
9929 .result = REJECT,
9930 .prog_type = BPF_PROG_TYPE_XDP,
9931 },
9932 {
9933 "XDP pkt read, pkt_data <= pkt_meta', good access",
9934 .insns = {
9935 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9936 offsetof(struct xdp_md, data_meta)),
9937 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9938 offsetof(struct xdp_md, data)),
9939 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9941 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9942 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9943 BPF_MOV64_IMM(BPF_REG_0, 0),
9944 BPF_EXIT_INSN(),
9945 },
9946 .result = ACCEPT,
9947 .prog_type = BPF_PROG_TYPE_XDP,
9948 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9949 },
9950 {
9951 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
9952 .insns = {
9953 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9954 offsetof(struct xdp_md, data_meta)),
9955 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9956 offsetof(struct xdp_md, data)),
9957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9959 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9960 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9961 BPF_MOV64_IMM(BPF_REG_0, 0),
9962 BPF_EXIT_INSN(),
9963 },
9964 .errstr = "R1 offset is outside of the packet",
9965 .result = REJECT,
9966 .prog_type = BPF_PROG_TYPE_XDP,
9967 },
9968 {
9969 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9970 .insns = {
9971 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9972 offsetof(struct xdp_md, data_meta)),
9973 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9974 offsetof(struct xdp_md, data)),
9975 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9977 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9978 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9979 BPF_MOV64_IMM(BPF_REG_0, 0),
9980 BPF_EXIT_INSN(),
9981 },
9982 .errstr = "R1 offset is outside of the packet",
9983 .result = REJECT,
9984 .prog_type = BPF_PROG_TYPE_XDP,
9985 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9986 },
9987 {
Daniel Borkmann6f161012018-01-18 01:15:21 +01009988 "check deducing bounds from const, 1",
9989 .insns = {
9990 BPF_MOV64_IMM(BPF_REG_0, 1),
9991 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
9992 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9993 BPF_EXIT_INSN(),
9994 },
9995 .result = REJECT,
9996 .errstr = "R0 tried to subtract pointer from scalar",
9997 },
9998 {
9999 "check deducing bounds from const, 2",
10000 .insns = {
10001 BPF_MOV64_IMM(BPF_REG_0, 1),
10002 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10003 BPF_EXIT_INSN(),
10004 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10005 BPF_EXIT_INSN(),
10006 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10007 BPF_EXIT_INSN(),
10008 },
10009 .result = ACCEPT,
Yonghong Song35136922018-01-22 22:10:59 -080010010 .retval = 1,
Daniel Borkmann6f161012018-01-18 01:15:21 +010010011 },
10012 {
10013 "check deducing bounds from const, 3",
10014 .insns = {
10015 BPF_MOV64_IMM(BPF_REG_0, 0),
10016 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10017 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10018 BPF_EXIT_INSN(),
10019 },
10020 .result = REJECT,
10021 .errstr = "R0 tried to subtract pointer from scalar",
10022 },
10023 {
10024 "check deducing bounds from const, 4",
10025 .insns = {
10026 BPF_MOV64_IMM(BPF_REG_0, 0),
10027 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10028 BPF_EXIT_INSN(),
10029 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10030 BPF_EXIT_INSN(),
10031 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10032 BPF_EXIT_INSN(),
10033 },
10034 .result = ACCEPT,
10035 },
10036 {
10037 "check deducing bounds from const, 5",
10038 .insns = {
10039 BPF_MOV64_IMM(BPF_REG_0, 0),
10040 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10041 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10042 BPF_EXIT_INSN(),
10043 },
10044 .result = REJECT,
10045 .errstr = "R0 tried to subtract pointer from scalar",
10046 },
10047 {
10048 "check deducing bounds from const, 6",
10049 .insns = {
10050 BPF_MOV64_IMM(BPF_REG_0, 0),
10051 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10052 BPF_EXIT_INSN(),
10053 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10054 BPF_EXIT_INSN(),
10055 },
10056 .result = REJECT,
10057 .errstr = "R0 tried to subtract pointer from scalar",
10058 },
10059 {
10060 "check deducing bounds from const, 7",
10061 .insns = {
10062 BPF_MOV64_IMM(BPF_REG_0, ~0),
10063 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10064 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10065 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10066 offsetof(struct __sk_buff, mark)),
10067 BPF_EXIT_INSN(),
10068 },
10069 .result = REJECT,
10070 .errstr = "dereference of modified ctx ptr",
10071 },
10072 {
10073 "check deducing bounds from const, 8",
10074 .insns = {
10075 BPF_MOV64_IMM(BPF_REG_0, ~0),
10076 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10077 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10078 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10079 offsetof(struct __sk_buff, mark)),
10080 BPF_EXIT_INSN(),
10081 },
10082 .result = REJECT,
10083 .errstr = "dereference of modified ctx ptr",
10084 },
10085 {
10086 "check deducing bounds from const, 9",
10087 .insns = {
10088 BPF_MOV64_IMM(BPF_REG_0, 0),
10089 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10090 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10091 BPF_EXIT_INSN(),
10092 },
10093 .result = REJECT,
10094 .errstr = "R0 tried to subtract pointer from scalar",
10095 },
10096 {
10097 "check deducing bounds from const, 10",
10098 .insns = {
10099 BPF_MOV64_IMM(BPF_REG_0, 0),
10100 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10101 /* Marks reg as unknown. */
10102 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10103 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10104 BPF_EXIT_INSN(),
10105 },
10106 .result = REJECT,
10107 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10108 },
10109 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +010010110 "bpf_exit with invalid return code. test1",
10111 .insns = {
10112 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10113 BPF_EXIT_INSN(),
10114 },
10115 .errstr = "R0 has value (0x0; 0xffffffff)",
10116 .result = REJECT,
10117 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10118 },
10119 {
10120 "bpf_exit with invalid return code. test2",
10121 .insns = {
10122 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10123 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10124 BPF_EXIT_INSN(),
10125 },
10126 .result = ACCEPT,
10127 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10128 },
10129 {
10130 "bpf_exit with invalid return code. test3",
10131 .insns = {
10132 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10133 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10134 BPF_EXIT_INSN(),
10135 },
10136 .errstr = "R0 has value (0x0; 0x3)",
10137 .result = REJECT,
10138 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10139 },
10140 {
10141 "bpf_exit with invalid return code. test4",
10142 .insns = {
10143 BPF_MOV64_IMM(BPF_REG_0, 1),
10144 BPF_EXIT_INSN(),
10145 },
10146 .result = ACCEPT,
10147 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10148 },
10149 {
10150 "bpf_exit with invalid return code. test5",
10151 .insns = {
10152 BPF_MOV64_IMM(BPF_REG_0, 2),
10153 BPF_EXIT_INSN(),
10154 },
10155 .errstr = "R0 has value (0x2; 0x0)",
10156 .result = REJECT,
10157 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10158 },
10159 {
10160 "bpf_exit with invalid return code. test6",
10161 .insns = {
10162 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10163 BPF_EXIT_INSN(),
10164 },
10165 .errstr = "R0 is not a known value (ctx)",
10166 .result = REJECT,
10167 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10168 },
10169 {
10170 "bpf_exit with invalid return code. test7",
10171 .insns = {
10172 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10173 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10174 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10175 BPF_EXIT_INSN(),
10176 },
10177 .errstr = "R0 has unknown scalar value",
10178 .result = REJECT,
10179 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10180 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010181 {
10182 "calls: basic sanity",
10183 .insns = {
10184 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10185 BPF_MOV64_IMM(BPF_REG_0, 1),
10186 BPF_EXIT_INSN(),
10187 BPF_MOV64_IMM(BPF_REG_0, 2),
10188 BPF_EXIT_INSN(),
10189 },
10190 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10191 .result = ACCEPT,
10192 },
10193 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010194 "calls: not on unpriviledged",
10195 .insns = {
10196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10197 BPF_MOV64_IMM(BPF_REG_0, 1),
10198 BPF_EXIT_INSN(),
10199 BPF_MOV64_IMM(BPF_REG_0, 2),
10200 BPF_EXIT_INSN(),
10201 },
10202 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10203 .result_unpriv = REJECT,
10204 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010205 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010206 },
10207 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010010208 "calls: div by 0 in subprog",
10209 .insns = {
10210 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10211 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10212 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10213 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10214 offsetof(struct __sk_buff, data_end)),
10215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10217 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10218 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10219 BPF_MOV64_IMM(BPF_REG_0, 1),
10220 BPF_EXIT_INSN(),
10221 BPF_MOV32_IMM(BPF_REG_2, 0),
10222 BPF_MOV32_IMM(BPF_REG_3, 1),
10223 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10224 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10225 offsetof(struct __sk_buff, data)),
10226 BPF_EXIT_INSN(),
10227 },
10228 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10229 .result = ACCEPT,
10230 .retval = 1,
10231 },
10232 {
10233 "calls: multiple ret types in subprog 1",
10234 .insns = {
10235 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10237 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10238 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10239 offsetof(struct __sk_buff, data_end)),
10240 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10242 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10243 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10244 BPF_MOV64_IMM(BPF_REG_0, 1),
10245 BPF_EXIT_INSN(),
10246 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10247 offsetof(struct __sk_buff, data)),
10248 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10249 BPF_MOV32_IMM(BPF_REG_0, 42),
10250 BPF_EXIT_INSN(),
10251 },
10252 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10253 .result = REJECT,
10254 .errstr = "R0 invalid mem access 'inv'",
10255 },
10256 {
10257 "calls: multiple ret types in subprog 2",
10258 .insns = {
10259 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10261 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10262 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10263 offsetof(struct __sk_buff, data_end)),
10264 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10266 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10267 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10268 BPF_MOV64_IMM(BPF_REG_0, 1),
10269 BPF_EXIT_INSN(),
10270 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10271 offsetof(struct __sk_buff, data)),
10272 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10273 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10274 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10275 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10277 BPF_LD_MAP_FD(BPF_REG_1, 0),
10278 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10279 BPF_FUNC_map_lookup_elem),
10280 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10281 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10282 offsetof(struct __sk_buff, data)),
10283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10284 BPF_EXIT_INSN(),
10285 },
10286 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090010287 .fixup_map_hash_8b = { 16 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010010288 .result = REJECT,
10289 .errstr = "R0 min value is outside of the array range",
10290 },
10291 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010292 "calls: overlapping caller/callee",
10293 .insns = {
10294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10295 BPF_MOV64_IMM(BPF_REG_0, 1),
10296 BPF_EXIT_INSN(),
10297 },
10298 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10299 .errstr = "last insn is not an exit or jmp",
10300 .result = REJECT,
10301 },
10302 {
10303 "calls: wrong recursive calls",
10304 .insns = {
10305 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10306 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10307 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10308 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10309 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10310 BPF_MOV64_IMM(BPF_REG_0, 1),
10311 BPF_EXIT_INSN(),
10312 },
10313 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10314 .errstr = "jump out of range",
10315 .result = REJECT,
10316 },
10317 {
10318 "calls: wrong src reg",
10319 .insns = {
10320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10321 BPF_MOV64_IMM(BPF_REG_0, 1),
10322 BPF_EXIT_INSN(),
10323 },
10324 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10325 .errstr = "BPF_CALL uses reserved fields",
10326 .result = REJECT,
10327 },
10328 {
10329 "calls: wrong off value",
10330 .insns = {
10331 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10332 BPF_MOV64_IMM(BPF_REG_0, 1),
10333 BPF_EXIT_INSN(),
10334 BPF_MOV64_IMM(BPF_REG_0, 2),
10335 BPF_EXIT_INSN(),
10336 },
10337 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10338 .errstr = "BPF_CALL uses reserved fields",
10339 .result = REJECT,
10340 },
10341 {
10342 "calls: jump back loop",
10343 .insns = {
10344 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10345 BPF_MOV64_IMM(BPF_REG_0, 1),
10346 BPF_EXIT_INSN(),
10347 },
10348 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10349 .errstr = "back-edge from insn 0 to 0",
10350 .result = REJECT,
10351 },
10352 {
10353 "calls: conditional call",
10354 .insns = {
10355 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10356 offsetof(struct __sk_buff, mark)),
10357 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10359 BPF_MOV64_IMM(BPF_REG_0, 1),
10360 BPF_EXIT_INSN(),
10361 BPF_MOV64_IMM(BPF_REG_0, 2),
10362 BPF_EXIT_INSN(),
10363 },
10364 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10365 .errstr = "jump out of range",
10366 .result = REJECT,
10367 },
10368 {
10369 "calls: conditional call 2",
10370 .insns = {
10371 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10372 offsetof(struct __sk_buff, mark)),
10373 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10374 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10375 BPF_MOV64_IMM(BPF_REG_0, 1),
10376 BPF_EXIT_INSN(),
10377 BPF_MOV64_IMM(BPF_REG_0, 2),
10378 BPF_EXIT_INSN(),
10379 BPF_MOV64_IMM(BPF_REG_0, 3),
10380 BPF_EXIT_INSN(),
10381 },
10382 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10383 .result = ACCEPT,
10384 },
10385 {
10386 "calls: conditional call 3",
10387 .insns = {
10388 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10389 offsetof(struct __sk_buff, mark)),
10390 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10391 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10392 BPF_MOV64_IMM(BPF_REG_0, 1),
10393 BPF_EXIT_INSN(),
10394 BPF_MOV64_IMM(BPF_REG_0, 1),
10395 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10396 BPF_MOV64_IMM(BPF_REG_0, 3),
10397 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10398 },
10399 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10400 .errstr = "back-edge from insn",
10401 .result = REJECT,
10402 },
10403 {
10404 "calls: conditional call 4",
10405 .insns = {
10406 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10407 offsetof(struct __sk_buff, mark)),
10408 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10410 BPF_MOV64_IMM(BPF_REG_0, 1),
10411 BPF_EXIT_INSN(),
10412 BPF_MOV64_IMM(BPF_REG_0, 1),
10413 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
10414 BPF_MOV64_IMM(BPF_REG_0, 3),
10415 BPF_EXIT_INSN(),
10416 },
10417 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10418 .result = ACCEPT,
10419 },
10420 {
10421 "calls: conditional call 5",
10422 .insns = {
10423 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10424 offsetof(struct __sk_buff, mark)),
10425 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10426 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10427 BPF_MOV64_IMM(BPF_REG_0, 1),
10428 BPF_EXIT_INSN(),
10429 BPF_MOV64_IMM(BPF_REG_0, 1),
10430 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10431 BPF_MOV64_IMM(BPF_REG_0, 3),
10432 BPF_EXIT_INSN(),
10433 },
10434 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10435 .errstr = "back-edge from insn",
10436 .result = REJECT,
10437 },
10438 {
10439 "calls: conditional call 6",
10440 .insns = {
10441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10442 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
10443 BPF_EXIT_INSN(),
10444 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10445 offsetof(struct __sk_buff, mark)),
10446 BPF_EXIT_INSN(),
10447 },
10448 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10449 .errstr = "back-edge from insn",
10450 .result = REJECT,
10451 },
10452 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010453 "calls: using r0 returned by callee",
10454 .insns = {
10455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10456 BPF_EXIT_INSN(),
10457 BPF_MOV64_IMM(BPF_REG_0, 2),
10458 BPF_EXIT_INSN(),
10459 },
10460 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10461 .result = ACCEPT,
10462 },
10463 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010464 "calls: using uninit r0 from callee",
10465 .insns = {
10466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10467 BPF_EXIT_INSN(),
10468 BPF_EXIT_INSN(),
10469 },
10470 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10471 .errstr = "!read_ok",
10472 .result = REJECT,
10473 },
10474 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010475 "calls: callee is using r1",
10476 .insns = {
10477 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10478 BPF_EXIT_INSN(),
10479 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10480 offsetof(struct __sk_buff, len)),
10481 BPF_EXIT_INSN(),
10482 },
10483 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
10484 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010485 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010486 },
10487 {
10488 "calls: callee using args1",
10489 .insns = {
10490 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10491 BPF_EXIT_INSN(),
10492 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10493 BPF_EXIT_INSN(),
10494 },
10495 .errstr_unpriv = "allowed for root only",
10496 .result_unpriv = REJECT,
10497 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010498 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010499 },
10500 {
10501 "calls: callee using wrong args2",
10502 .insns = {
10503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10504 BPF_EXIT_INSN(),
10505 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10506 BPF_EXIT_INSN(),
10507 },
10508 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10509 .errstr = "R2 !read_ok",
10510 .result = REJECT,
10511 },
10512 {
10513 "calls: callee using two args",
10514 .insns = {
10515 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10516 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
10517 offsetof(struct __sk_buff, len)),
10518 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
10519 offsetof(struct __sk_buff, len)),
10520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10521 BPF_EXIT_INSN(),
10522 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10523 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
10524 BPF_EXIT_INSN(),
10525 },
10526 .errstr_unpriv = "allowed for root only",
10527 .result_unpriv = REJECT,
10528 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010529 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010530 },
10531 {
10532 "calls: callee changing pkt pointers",
10533 .insns = {
10534 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
10535 offsetof(struct xdp_md, data)),
10536 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
10537 offsetof(struct xdp_md, data_end)),
10538 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
10539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
10540 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
10541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10542 /* clear_all_pkt_pointers() has to walk all frames
10543 * to make sure that pkt pointers in the caller
10544 * are cleared when callee is calling a helper that
10545 * adjusts packet size
10546 */
10547 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10548 BPF_MOV32_IMM(BPF_REG_0, 0),
10549 BPF_EXIT_INSN(),
10550 BPF_MOV64_IMM(BPF_REG_2, 0),
10551 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10552 BPF_FUNC_xdp_adjust_head),
10553 BPF_EXIT_INSN(),
10554 },
10555 .result = REJECT,
10556 .errstr = "R6 invalid mem access 'inv'",
10557 .prog_type = BPF_PROG_TYPE_XDP,
10558 },
10559 {
10560 "calls: two calls with args",
10561 .insns = {
10562 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10563 BPF_EXIT_INSN(),
10564 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10565 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10566 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10567 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10569 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10570 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10571 BPF_EXIT_INSN(),
10572 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10573 offsetof(struct __sk_buff, len)),
10574 BPF_EXIT_INSN(),
10575 },
10576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10577 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010578 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010579 },
10580 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010581 "calls: calls with stack arith",
10582 .insns = {
10583 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10586 BPF_EXIT_INSN(),
10587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10588 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10589 BPF_EXIT_INSN(),
10590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10591 BPF_MOV64_IMM(BPF_REG_0, 42),
10592 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10593 BPF_EXIT_INSN(),
10594 },
10595 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10596 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010597 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010598 },
10599 {
10600 "calls: calls with misaligned stack access",
10601 .insns = {
10602 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10604 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10605 BPF_EXIT_INSN(),
10606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
10607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10608 BPF_EXIT_INSN(),
10609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10610 BPF_MOV64_IMM(BPF_REG_0, 42),
10611 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10612 BPF_EXIT_INSN(),
10613 },
10614 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10615 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
10616 .errstr = "misaligned stack access",
10617 .result = REJECT,
10618 },
10619 {
10620 "calls: calls control flow, jump test",
10621 .insns = {
10622 BPF_MOV64_IMM(BPF_REG_0, 42),
10623 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10624 BPF_MOV64_IMM(BPF_REG_0, 43),
10625 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10626 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10627 BPF_EXIT_INSN(),
10628 },
10629 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10630 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010631 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010632 },
10633 {
10634 "calls: calls control flow, jump test 2",
10635 .insns = {
10636 BPF_MOV64_IMM(BPF_REG_0, 42),
10637 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10638 BPF_MOV64_IMM(BPF_REG_0, 43),
10639 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10640 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10641 BPF_EXIT_INSN(),
10642 },
10643 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10644 .errstr = "jump out of range from insn 1 to 4",
10645 .result = REJECT,
10646 },
10647 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010648 "calls: two calls with bad jump",
10649 .insns = {
10650 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10651 BPF_EXIT_INSN(),
10652 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10654 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10655 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10656 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10657 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10658 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10659 BPF_EXIT_INSN(),
10660 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10661 offsetof(struct __sk_buff, len)),
10662 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
10663 BPF_EXIT_INSN(),
10664 },
10665 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10666 .errstr = "jump out of range from insn 11 to 9",
10667 .result = REJECT,
10668 },
10669 {
10670 "calls: recursive call. test1",
10671 .insns = {
10672 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10673 BPF_EXIT_INSN(),
10674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10675 BPF_EXIT_INSN(),
10676 },
10677 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10678 .errstr = "back-edge",
10679 .result = REJECT,
10680 },
10681 {
10682 "calls: recursive call. test2",
10683 .insns = {
10684 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10685 BPF_EXIT_INSN(),
10686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10687 BPF_EXIT_INSN(),
10688 },
10689 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10690 .errstr = "back-edge",
10691 .result = REJECT,
10692 },
10693 {
10694 "calls: unreachable code",
10695 .insns = {
10696 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10697 BPF_EXIT_INSN(),
10698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10699 BPF_EXIT_INSN(),
10700 BPF_MOV64_IMM(BPF_REG_0, 0),
10701 BPF_EXIT_INSN(),
10702 BPF_MOV64_IMM(BPF_REG_0, 0),
10703 BPF_EXIT_INSN(),
10704 },
10705 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10706 .errstr = "unreachable insn 6",
10707 .result = REJECT,
10708 },
10709 {
10710 "calls: invalid call",
10711 .insns = {
10712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10713 BPF_EXIT_INSN(),
10714 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
10715 BPF_EXIT_INSN(),
10716 },
10717 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10718 .errstr = "invalid destination",
10719 .result = REJECT,
10720 },
10721 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010722 "calls: invalid call 2",
10723 .insns = {
10724 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10725 BPF_EXIT_INSN(),
10726 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
10727 BPF_EXIT_INSN(),
10728 },
10729 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10730 .errstr = "invalid destination",
10731 .result = REJECT,
10732 },
10733 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010734 "calls: jumping across function bodies. test1",
10735 .insns = {
10736 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10737 BPF_MOV64_IMM(BPF_REG_0, 0),
10738 BPF_EXIT_INSN(),
10739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
10740 BPF_EXIT_INSN(),
10741 },
10742 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10743 .errstr = "jump out of range",
10744 .result = REJECT,
10745 },
10746 {
10747 "calls: jumping across function bodies. test2",
10748 .insns = {
10749 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
10750 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10751 BPF_MOV64_IMM(BPF_REG_0, 0),
10752 BPF_EXIT_INSN(),
10753 BPF_EXIT_INSN(),
10754 },
10755 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10756 .errstr = "jump out of range",
10757 .result = REJECT,
10758 },
10759 {
10760 "calls: call without exit",
10761 .insns = {
10762 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10763 BPF_EXIT_INSN(),
10764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10765 BPF_EXIT_INSN(),
10766 BPF_MOV64_IMM(BPF_REG_0, 0),
10767 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
10768 },
10769 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10770 .errstr = "not an exit",
10771 .result = REJECT,
10772 },
10773 {
10774 "calls: call into middle of ld_imm64",
10775 .insns = {
10776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10777 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10778 BPF_MOV64_IMM(BPF_REG_0, 0),
10779 BPF_EXIT_INSN(),
10780 BPF_LD_IMM64(BPF_REG_0, 0),
10781 BPF_EXIT_INSN(),
10782 },
10783 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10784 .errstr = "last insn",
10785 .result = REJECT,
10786 },
10787 {
10788 "calls: call into middle of other call",
10789 .insns = {
10790 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10791 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10792 BPF_MOV64_IMM(BPF_REG_0, 0),
10793 BPF_EXIT_INSN(),
10794 BPF_MOV64_IMM(BPF_REG_0, 0),
10795 BPF_MOV64_IMM(BPF_REG_0, 0),
10796 BPF_EXIT_INSN(),
10797 },
10798 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10799 .errstr = "last insn",
10800 .result = REJECT,
10801 },
10802 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010803 "calls: ld_abs with changing ctx data in callee",
10804 .insns = {
10805 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10806 BPF_LD_ABS(BPF_B, 0),
10807 BPF_LD_ABS(BPF_H, 0),
10808 BPF_LD_ABS(BPF_W, 0),
10809 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
10810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10811 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
10812 BPF_LD_ABS(BPF_B, 0),
10813 BPF_LD_ABS(BPF_H, 0),
10814 BPF_LD_ABS(BPF_W, 0),
10815 BPF_EXIT_INSN(),
10816 BPF_MOV64_IMM(BPF_REG_2, 1),
10817 BPF_MOV64_IMM(BPF_REG_3, 2),
10818 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10819 BPF_FUNC_skb_vlan_push),
10820 BPF_EXIT_INSN(),
10821 },
10822 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10823 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
10824 .result = REJECT,
10825 },
10826 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010827 "calls: two calls with bad fallthrough",
10828 .insns = {
10829 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10830 BPF_EXIT_INSN(),
10831 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10832 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10833 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10834 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10836 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10837 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10838 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
10839 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10840 offsetof(struct __sk_buff, len)),
10841 BPF_EXIT_INSN(),
10842 },
10843 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10844 .errstr = "not an exit",
10845 .result = REJECT,
10846 },
10847 {
10848 "calls: two calls with stack read",
10849 .insns = {
10850 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10851 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10853 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10854 BPF_EXIT_INSN(),
10855 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10856 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10857 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10858 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10859 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10860 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10861 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10862 BPF_EXIT_INSN(),
10863 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10864 BPF_EXIT_INSN(),
10865 },
10866 .prog_type = BPF_PROG_TYPE_XDP,
10867 .result = ACCEPT,
10868 },
10869 {
10870 "calls: two calls with stack write",
10871 .insns = {
10872 /* main prog */
10873 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10876 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10878 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10879 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10880 BPF_EXIT_INSN(),
10881
10882 /* subprog 1 */
10883 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10884 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10885 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
10886 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
10887 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10888 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10889 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
10890 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
10891 /* write into stack frame of main prog */
10892 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10893 BPF_EXIT_INSN(),
10894
10895 /* subprog 2 */
10896 /* read from stack frame of main prog */
10897 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10898 BPF_EXIT_INSN(),
10899 },
10900 .prog_type = BPF_PROG_TYPE_XDP,
10901 .result = ACCEPT,
10902 },
10903 {
Jann Horn6b80ad22017-12-22 19:12:35 +010010904 "calls: stack overflow using two frames (pre-call access)",
10905 .insns = {
10906 /* prog 1 */
10907 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10908 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
10909 BPF_EXIT_INSN(),
10910
10911 /* prog 2 */
10912 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10913 BPF_MOV64_IMM(BPF_REG_0, 0),
10914 BPF_EXIT_INSN(),
10915 },
10916 .prog_type = BPF_PROG_TYPE_XDP,
10917 .errstr = "combined stack size",
10918 .result = REJECT,
10919 },
10920 {
10921 "calls: stack overflow using two frames (post-call access)",
10922 .insns = {
10923 /* prog 1 */
10924 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
10925 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10926 BPF_EXIT_INSN(),
10927
10928 /* prog 2 */
10929 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10930 BPF_MOV64_IMM(BPF_REG_0, 0),
10931 BPF_EXIT_INSN(),
10932 },
10933 .prog_type = BPF_PROG_TYPE_XDP,
10934 .errstr = "combined stack size",
10935 .result = REJECT,
10936 },
10937 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -080010938 "calls: stack depth check using three frames. test1",
10939 .insns = {
10940 /* main */
10941 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10942 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10943 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10944 BPF_MOV64_IMM(BPF_REG_0, 0),
10945 BPF_EXIT_INSN(),
10946 /* A */
10947 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10948 BPF_EXIT_INSN(),
10949 /* B */
10950 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10951 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10952 BPF_EXIT_INSN(),
10953 },
10954 .prog_type = BPF_PROG_TYPE_XDP,
10955 /* stack_main=32, stack_A=256, stack_B=64
10956 * and max(main+A, main+A+B) < 512
10957 */
10958 .result = ACCEPT,
10959 },
10960 {
10961 "calls: stack depth check using three frames. test2",
10962 .insns = {
10963 /* main */
10964 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10965 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10966 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10967 BPF_MOV64_IMM(BPF_REG_0, 0),
10968 BPF_EXIT_INSN(),
10969 /* A */
10970 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10971 BPF_EXIT_INSN(),
10972 /* B */
10973 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10974 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10975 BPF_EXIT_INSN(),
10976 },
10977 .prog_type = BPF_PROG_TYPE_XDP,
10978 /* stack_main=32, stack_A=64, stack_B=256
10979 * and max(main+A, main+A+B) < 512
10980 */
10981 .result = ACCEPT,
10982 },
10983 {
10984 "calls: stack depth check using three frames. test3",
10985 .insns = {
10986 /* main */
10987 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10988 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10989 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10990 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
10991 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
10992 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10993 BPF_MOV64_IMM(BPF_REG_0, 0),
10994 BPF_EXIT_INSN(),
10995 /* A */
10996 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10997 BPF_EXIT_INSN(),
10998 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10999 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11000 /* B */
11001 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11002 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11003 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11004 BPF_EXIT_INSN(),
11005 },
11006 .prog_type = BPF_PROG_TYPE_XDP,
11007 /* stack_main=64, stack_A=224, stack_B=256
11008 * and max(main+A, main+A+B) > 512
11009 */
11010 .errstr = "combined stack",
11011 .result = REJECT,
11012 },
11013 {
11014 "calls: stack depth check using three frames. test4",
11015 /* void main(void) {
11016 * func1(0);
11017 * func1(1);
11018 * func2(1);
11019 * }
11020 * void func1(int alloc_or_recurse) {
11021 * if (alloc_or_recurse) {
11022 * frame_pointer[-300] = 1;
11023 * } else {
11024 * func2(alloc_or_recurse);
11025 * }
11026 * }
11027 * void func2(int alloc_or_recurse) {
11028 * if (alloc_or_recurse) {
11029 * frame_pointer[-300] = 1;
11030 * }
11031 * }
11032 */
11033 .insns = {
11034 /* main */
11035 BPF_MOV64_IMM(BPF_REG_1, 0),
11036 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11037 BPF_MOV64_IMM(BPF_REG_1, 1),
11038 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11039 BPF_MOV64_IMM(BPF_REG_1, 1),
11040 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11041 BPF_MOV64_IMM(BPF_REG_0, 0),
11042 BPF_EXIT_INSN(),
11043 /* A */
11044 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11045 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11046 BPF_EXIT_INSN(),
11047 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11048 BPF_EXIT_INSN(),
11049 /* B */
11050 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11051 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11052 BPF_EXIT_INSN(),
11053 },
11054 .prog_type = BPF_PROG_TYPE_XDP,
11055 .result = REJECT,
11056 .errstr = "combined stack",
11057 },
11058 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -080011059 "calls: stack depth check using three frames. test5",
11060 .insns = {
11061 /* main */
11062 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11063 BPF_EXIT_INSN(),
11064 /* A */
11065 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11066 BPF_EXIT_INSN(),
11067 /* B */
11068 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11069 BPF_EXIT_INSN(),
11070 /* C */
11071 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11072 BPF_EXIT_INSN(),
11073 /* D */
11074 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11075 BPF_EXIT_INSN(),
11076 /* E */
11077 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11078 BPF_EXIT_INSN(),
11079 /* F */
11080 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11081 BPF_EXIT_INSN(),
11082 /* G */
11083 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11084 BPF_EXIT_INSN(),
11085 /* H */
11086 BPF_MOV64_IMM(BPF_REG_0, 0),
11087 BPF_EXIT_INSN(),
11088 },
11089 .prog_type = BPF_PROG_TYPE_XDP,
11090 .errstr = "call stack",
11091 .result = REJECT,
11092 },
11093 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011094 "calls: spill into caller stack frame",
11095 .insns = {
11096 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11097 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11099 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11100 BPF_EXIT_INSN(),
11101 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11102 BPF_MOV64_IMM(BPF_REG_0, 0),
11103 BPF_EXIT_INSN(),
11104 },
11105 .prog_type = BPF_PROG_TYPE_XDP,
11106 .errstr = "cannot spill",
11107 .result = REJECT,
11108 },
11109 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011110 "calls: write into caller stack frame",
11111 .insns = {
11112 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11114 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11116 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11117 BPF_EXIT_INSN(),
11118 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11119 BPF_MOV64_IMM(BPF_REG_0, 0),
11120 BPF_EXIT_INSN(),
11121 },
11122 .prog_type = BPF_PROG_TYPE_XDP,
11123 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011124 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011125 },
11126 {
11127 "calls: write into callee stack frame",
11128 .insns = {
11129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11130 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11131 BPF_EXIT_INSN(),
11132 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11133 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11134 BPF_EXIT_INSN(),
11135 },
11136 .prog_type = BPF_PROG_TYPE_XDP,
11137 .errstr = "cannot return stack pointer",
11138 .result = REJECT,
11139 },
11140 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011141 "calls: two calls with stack write and void return",
11142 .insns = {
11143 /* main prog */
11144 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11145 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11147 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11150 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11151 BPF_EXIT_INSN(),
11152
11153 /* subprog 1 */
11154 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11155 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11157 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11159 BPF_EXIT_INSN(),
11160
11161 /* subprog 2 */
11162 /* write into stack frame of main prog */
11163 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11164 BPF_EXIT_INSN(), /* void return */
11165 },
11166 .prog_type = BPF_PROG_TYPE_XDP,
11167 .result = ACCEPT,
11168 },
11169 {
11170 "calls: ambiguous return value",
11171 .insns = {
11172 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11174 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11178 BPF_EXIT_INSN(),
11179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11180 BPF_MOV64_IMM(BPF_REG_0, 0),
11181 BPF_EXIT_INSN(),
11182 },
11183 .errstr_unpriv = "allowed for root only",
11184 .result_unpriv = REJECT,
11185 .errstr = "R0 !read_ok",
11186 .result = REJECT,
11187 },
11188 {
11189 "calls: two calls that return map_value",
11190 .insns = {
11191 /* main prog */
11192 /* pass fp-16, fp-8 into a function */
11193 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11195 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11198
11199 /* fetch map_value_ptr from the stack of this function */
11200 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11201 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11202 /* write into map value */
11203 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11204 /* fetch secound map_value_ptr from the stack */
11205 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11206 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11207 /* write into map value */
11208 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11209 BPF_MOV64_IMM(BPF_REG_0, 0),
11210 BPF_EXIT_INSN(),
11211
11212 /* subprog 1 */
11213 /* call 3rd function twice */
11214 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11215 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11216 /* first time with fp-8 */
11217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11218 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11219 /* second time with fp-16 */
11220 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11221 BPF_EXIT_INSN(),
11222
11223 /* subprog 2 */
11224 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11225 /* lookup from map */
11226 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11227 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11229 BPF_LD_MAP_FD(BPF_REG_1, 0),
11230 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11231 BPF_FUNC_map_lookup_elem),
11232 /* write map_value_ptr into stack frame of main prog */
11233 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11234 BPF_MOV64_IMM(BPF_REG_0, 0),
11235 BPF_EXIT_INSN(), /* return 0 */
11236 },
11237 .prog_type = BPF_PROG_TYPE_XDP,
Prashant Bhole908142e2018-10-09 10:04:53 +090011238 .fixup_map_hash_8b = { 23 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011239 .result = ACCEPT,
11240 },
11241 {
11242 "calls: two calls that return map_value with bool condition",
11243 .insns = {
11244 /* main prog */
11245 /* pass fp-16, fp-8 into a function */
11246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11248 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11250 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11251 BPF_MOV64_IMM(BPF_REG_0, 0),
11252 BPF_EXIT_INSN(),
11253
11254 /* subprog 1 */
11255 /* call 3rd function twice */
11256 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11257 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11258 /* first time with fp-8 */
11259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11260 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11261 /* fetch map_value_ptr from the stack of this function */
11262 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11263 /* write into map value */
11264 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11265 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11266 /* second time with fp-16 */
11267 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11268 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11269 /* fetch secound map_value_ptr from the stack */
11270 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11271 /* write into map value */
11272 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11273 BPF_EXIT_INSN(),
11274
11275 /* subprog 2 */
11276 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11277 /* lookup from map */
11278 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11279 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11281 BPF_LD_MAP_FD(BPF_REG_1, 0),
11282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11283 BPF_FUNC_map_lookup_elem),
11284 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11285 BPF_MOV64_IMM(BPF_REG_0, 0),
11286 BPF_EXIT_INSN(), /* return 0 */
11287 /* write map_value_ptr into stack frame of main prog */
11288 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11289 BPF_MOV64_IMM(BPF_REG_0, 1),
11290 BPF_EXIT_INSN(), /* return 1 */
11291 },
11292 .prog_type = BPF_PROG_TYPE_XDP,
Prashant Bhole908142e2018-10-09 10:04:53 +090011293 .fixup_map_hash_8b = { 23 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011294 .result = ACCEPT,
11295 },
11296 {
11297 "calls: two calls that return map_value with incorrect bool check",
11298 .insns = {
11299 /* main prog */
11300 /* pass fp-16, fp-8 into a function */
11301 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11303 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11305 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11306 BPF_MOV64_IMM(BPF_REG_0, 0),
11307 BPF_EXIT_INSN(),
11308
11309 /* subprog 1 */
11310 /* call 3rd function twice */
11311 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11312 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11313 /* first time with fp-8 */
11314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11315 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11316 /* fetch map_value_ptr from the stack of this function */
11317 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11318 /* write into map value */
11319 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11320 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11321 /* second time with fp-16 */
11322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11323 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11324 /* fetch secound map_value_ptr from the stack */
11325 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11326 /* write into map value */
11327 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11328 BPF_EXIT_INSN(),
11329
11330 /* subprog 2 */
11331 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11332 /* lookup from map */
11333 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11334 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11336 BPF_LD_MAP_FD(BPF_REG_1, 0),
11337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11338 BPF_FUNC_map_lookup_elem),
11339 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11340 BPF_MOV64_IMM(BPF_REG_0, 0),
11341 BPF_EXIT_INSN(), /* return 0 */
11342 /* write map_value_ptr into stack frame of main prog */
11343 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11344 BPF_MOV64_IMM(BPF_REG_0, 1),
11345 BPF_EXIT_INSN(), /* return 1 */
11346 },
11347 .prog_type = BPF_PROG_TYPE_XDP,
Prashant Bhole908142e2018-10-09 10:04:53 +090011348 .fixup_map_hash_8b = { 23 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011349 .result = REJECT,
11350 .errstr = "invalid read from stack off -16+0 size 8",
11351 },
11352 {
11353 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11354 .insns = {
11355 /* main prog */
11356 /* pass fp-16, fp-8 into a function */
11357 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11361 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11362 BPF_MOV64_IMM(BPF_REG_0, 0),
11363 BPF_EXIT_INSN(),
11364
11365 /* subprog 1 */
11366 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11367 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11368 /* 1st lookup from map */
11369 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11370 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11372 BPF_LD_MAP_FD(BPF_REG_1, 0),
11373 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11374 BPF_FUNC_map_lookup_elem),
11375 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11376 BPF_MOV64_IMM(BPF_REG_8, 0),
11377 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11378 /* write map_value_ptr into stack frame of main prog at fp-8 */
11379 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11380 BPF_MOV64_IMM(BPF_REG_8, 1),
11381
11382 /* 2nd lookup from map */
11383 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11385 BPF_LD_MAP_FD(BPF_REG_1, 0),
11386 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11387 BPF_FUNC_map_lookup_elem),
11388 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11389 BPF_MOV64_IMM(BPF_REG_9, 0),
11390 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11391 /* write map_value_ptr into stack frame of main prog at fp-16 */
11392 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11393 BPF_MOV64_IMM(BPF_REG_9, 1),
11394
11395 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11396 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11397 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11398 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11399 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11401 BPF_EXIT_INSN(),
11402
11403 /* subprog 2 */
11404 /* if arg2 == 1 do *arg1 = 0 */
11405 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11406 /* fetch map_value_ptr from the stack of this function */
11407 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11408 /* write into map value */
11409 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11410
11411 /* if arg4 == 1 do *arg3 = 0 */
11412 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11413 /* fetch map_value_ptr from the stack of this function */
11414 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11415 /* write into map value */
11416 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11417 BPF_EXIT_INSN(),
11418 },
11419 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011420 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011421 .result = REJECT,
11422 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11423 },
11424 {
11425 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
11426 .insns = {
11427 /* main prog */
11428 /* pass fp-16, fp-8 into a function */
11429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11431 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11433 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11434 BPF_MOV64_IMM(BPF_REG_0, 0),
11435 BPF_EXIT_INSN(),
11436
11437 /* subprog 1 */
11438 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11439 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11440 /* 1st lookup from map */
11441 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11442 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11444 BPF_LD_MAP_FD(BPF_REG_1, 0),
11445 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11446 BPF_FUNC_map_lookup_elem),
11447 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11448 BPF_MOV64_IMM(BPF_REG_8, 0),
11449 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11450 /* write map_value_ptr into stack frame of main prog at fp-8 */
11451 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11452 BPF_MOV64_IMM(BPF_REG_8, 1),
11453
11454 /* 2nd lookup from map */
11455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11457 BPF_LD_MAP_FD(BPF_REG_1, 0),
11458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11459 BPF_FUNC_map_lookup_elem),
11460 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11461 BPF_MOV64_IMM(BPF_REG_9, 0),
11462 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11463 /* write map_value_ptr into stack frame of main prog at fp-16 */
11464 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11465 BPF_MOV64_IMM(BPF_REG_9, 1),
11466
11467 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11468 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11469 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11470 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11471 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11472 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
11473 BPF_EXIT_INSN(),
11474
11475 /* subprog 2 */
11476 /* if arg2 == 1 do *arg1 = 0 */
11477 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11478 /* fetch map_value_ptr from the stack of this function */
11479 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11480 /* write into map value */
11481 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11482
11483 /* if arg4 == 1 do *arg3 = 0 */
11484 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11485 /* fetch map_value_ptr from the stack of this function */
11486 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11487 /* write into map value */
11488 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11489 BPF_EXIT_INSN(),
11490 },
11491 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011492 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011493 .result = ACCEPT,
11494 },
11495 {
11496 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
11497 .insns = {
11498 /* main prog */
11499 /* pass fp-16, fp-8 into a function */
11500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11502 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11504 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11505 BPF_MOV64_IMM(BPF_REG_0, 0),
11506 BPF_EXIT_INSN(),
11507
11508 /* subprog 1 */
11509 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11510 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11511 /* 1st lookup from map */
11512 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
11513 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11515 BPF_LD_MAP_FD(BPF_REG_1, 0),
11516 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11517 BPF_FUNC_map_lookup_elem),
11518 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11519 BPF_MOV64_IMM(BPF_REG_8, 0),
11520 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11521 /* write map_value_ptr into stack frame of main prog at fp-8 */
11522 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11523 BPF_MOV64_IMM(BPF_REG_8, 1),
11524
11525 /* 2nd lookup from map */
11526 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11527 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11528 BPF_LD_MAP_FD(BPF_REG_1, 0),
11529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11530 BPF_FUNC_map_lookup_elem),
11531 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11532 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
11533 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11534 /* write map_value_ptr into stack frame of main prog at fp-16 */
11535 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11536 BPF_MOV64_IMM(BPF_REG_9, 1),
11537
11538 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11539 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
11540 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11541 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11542 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11543 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
11544 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
11545
11546 /* subprog 2 */
11547 /* if arg2 == 1 do *arg1 = 0 */
11548 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11549 /* fetch map_value_ptr from the stack of this function */
11550 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11551 /* write into map value */
11552 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11553
11554 /* if arg4 == 1 do *arg3 = 0 */
11555 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11556 /* fetch map_value_ptr from the stack of this function */
11557 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11558 /* write into map value */
11559 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11560 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
11561 },
11562 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011563 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011564 .result = REJECT,
11565 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
11566 },
11567 {
11568 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
11569 .insns = {
11570 /* main prog */
11571 /* pass fp-16, fp-8 into a function */
11572 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11573 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11574 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11576 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11577 BPF_MOV64_IMM(BPF_REG_0, 0),
11578 BPF_EXIT_INSN(),
11579
11580 /* subprog 1 */
11581 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11582 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11583 /* 1st lookup from map */
11584 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11585 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11586 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11587 BPF_LD_MAP_FD(BPF_REG_1, 0),
11588 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11589 BPF_FUNC_map_lookup_elem),
11590 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11591 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11592 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11593 BPF_MOV64_IMM(BPF_REG_8, 0),
11594 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11595 BPF_MOV64_IMM(BPF_REG_8, 1),
11596
11597 /* 2nd lookup from map */
11598 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11600 BPF_LD_MAP_FD(BPF_REG_1, 0),
11601 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11602 BPF_FUNC_map_lookup_elem),
11603 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11604 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11605 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11606 BPF_MOV64_IMM(BPF_REG_9, 0),
11607 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11608 BPF_MOV64_IMM(BPF_REG_9, 1),
11609
11610 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11611 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11612 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11613 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11614 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11616 BPF_EXIT_INSN(),
11617
11618 /* subprog 2 */
11619 /* if arg2 == 1 do *arg1 = 0 */
11620 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11621 /* fetch map_value_ptr from the stack of this function */
11622 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11623 /* write into map value */
11624 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11625
11626 /* if arg4 == 1 do *arg3 = 0 */
11627 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11628 /* fetch map_value_ptr from the stack of this function */
11629 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11630 /* write into map value */
11631 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11632 BPF_EXIT_INSN(),
11633 },
11634 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011635 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011636 .result = ACCEPT,
11637 },
11638 {
11639 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
11640 .insns = {
11641 /* main prog */
11642 /* pass fp-16, fp-8 into a function */
11643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11645 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11648 BPF_MOV64_IMM(BPF_REG_0, 0),
11649 BPF_EXIT_INSN(),
11650
11651 /* subprog 1 */
11652 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11653 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11654 /* 1st lookup from map */
11655 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11656 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11658 BPF_LD_MAP_FD(BPF_REG_1, 0),
11659 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11660 BPF_FUNC_map_lookup_elem),
11661 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11662 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11663 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11664 BPF_MOV64_IMM(BPF_REG_8, 0),
11665 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11666 BPF_MOV64_IMM(BPF_REG_8, 1),
11667
11668 /* 2nd lookup from map */
11669 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11671 BPF_LD_MAP_FD(BPF_REG_1, 0),
11672 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11673 BPF_FUNC_map_lookup_elem),
11674 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11675 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11676 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11677 BPF_MOV64_IMM(BPF_REG_9, 0),
11678 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11679 BPF_MOV64_IMM(BPF_REG_9, 1),
11680
11681 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11682 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11684 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11685 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11687 BPF_EXIT_INSN(),
11688
11689 /* subprog 2 */
11690 /* if arg2 == 1 do *arg1 = 0 */
11691 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11692 /* fetch map_value_ptr from the stack of this function */
11693 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11694 /* write into map value */
11695 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11696
11697 /* if arg4 == 0 do *arg3 = 0 */
11698 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
11699 /* fetch map_value_ptr from the stack of this function */
11700 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11701 /* write into map value */
11702 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11703 BPF_EXIT_INSN(),
11704 },
11705 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090011706 .fixup_map_hash_8b = { 12, 22 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011707 .result = REJECT,
11708 .errstr = "R0 invalid mem access 'inv'",
11709 },
11710 {
11711 "calls: pkt_ptr spill into caller stack",
11712 .insns = {
11713 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11716 BPF_EXIT_INSN(),
11717
11718 /* subprog 1 */
11719 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11720 offsetof(struct __sk_buff, data)),
11721 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11722 offsetof(struct __sk_buff, data_end)),
11723 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11725 /* spill unchecked pkt_ptr into stack of caller */
11726 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11727 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11728 /* now the pkt range is verified, read pkt_ptr from stack */
11729 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11730 /* write 4 bytes into packet */
11731 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11732 BPF_EXIT_INSN(),
11733 },
11734 .result = ACCEPT,
11735 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011736 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080011737 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011738 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011739 "calls: pkt_ptr spill into caller stack 2",
11740 .insns = {
11741 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11744 /* Marking is still kept, but not in all cases safe. */
11745 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11746 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11747 BPF_EXIT_INSN(),
11748
11749 /* subprog 1 */
11750 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11751 offsetof(struct __sk_buff, data)),
11752 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11753 offsetof(struct __sk_buff, data_end)),
11754 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11756 /* spill unchecked pkt_ptr into stack of caller */
11757 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11758 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11759 /* now the pkt range is verified, read pkt_ptr from stack */
11760 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11761 /* write 4 bytes into packet */
11762 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11763 BPF_EXIT_INSN(),
11764 },
11765 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11766 .errstr = "invalid access to packet",
11767 .result = REJECT,
11768 },
11769 {
11770 "calls: pkt_ptr spill into caller stack 3",
11771 .insns = {
11772 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11776 /* Marking is still kept and safe here. */
11777 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11778 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11779 BPF_EXIT_INSN(),
11780
11781 /* subprog 1 */
11782 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11783 offsetof(struct __sk_buff, data)),
11784 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11785 offsetof(struct __sk_buff, data_end)),
11786 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11788 /* spill unchecked pkt_ptr into stack of caller */
11789 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11790 BPF_MOV64_IMM(BPF_REG_5, 0),
11791 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11792 BPF_MOV64_IMM(BPF_REG_5, 1),
11793 /* now the pkt range is verified, read pkt_ptr from stack */
11794 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11795 /* write 4 bytes into packet */
11796 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11797 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11798 BPF_EXIT_INSN(),
11799 },
11800 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11801 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011802 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011803 },
11804 {
11805 "calls: pkt_ptr spill into caller stack 4",
11806 .insns = {
11807 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11811 /* Check marking propagated. */
11812 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11813 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11814 BPF_EXIT_INSN(),
11815
11816 /* subprog 1 */
11817 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11818 offsetof(struct __sk_buff, data)),
11819 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11820 offsetof(struct __sk_buff, data_end)),
11821 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11823 /* spill unchecked pkt_ptr into stack of caller */
11824 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11825 BPF_MOV64_IMM(BPF_REG_5, 0),
11826 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11827 BPF_MOV64_IMM(BPF_REG_5, 1),
11828 /* don't read back pkt_ptr from stack here */
11829 /* write 4 bytes into packet */
11830 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11831 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11832 BPF_EXIT_INSN(),
11833 },
11834 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11835 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011836 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080011837 },
11838 {
11839 "calls: pkt_ptr spill into caller stack 5",
11840 .insns = {
11841 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11843 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
11844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11845 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11846 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11847 BPF_EXIT_INSN(),
11848
11849 /* subprog 1 */
11850 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11851 offsetof(struct __sk_buff, data)),
11852 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11853 offsetof(struct __sk_buff, data_end)),
11854 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11855 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11856 BPF_MOV64_IMM(BPF_REG_5, 0),
11857 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11858 /* spill checked pkt_ptr into stack of caller */
11859 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11860 BPF_MOV64_IMM(BPF_REG_5, 1),
11861 /* don't read back pkt_ptr from stack here */
11862 /* write 4 bytes into packet */
11863 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11864 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11865 BPF_EXIT_INSN(),
11866 },
11867 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11868 .errstr = "same insn cannot be used with different",
11869 .result = REJECT,
11870 },
11871 {
11872 "calls: pkt_ptr spill into caller stack 6",
11873 .insns = {
11874 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11875 offsetof(struct __sk_buff, data_end)),
11876 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11878 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11879 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11880 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11881 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11882 BPF_EXIT_INSN(),
11883
11884 /* subprog 1 */
11885 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11886 offsetof(struct __sk_buff, data)),
11887 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11888 offsetof(struct __sk_buff, data_end)),
11889 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11891 BPF_MOV64_IMM(BPF_REG_5, 0),
11892 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11893 /* spill checked pkt_ptr into stack of caller */
11894 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11895 BPF_MOV64_IMM(BPF_REG_5, 1),
11896 /* don't read back pkt_ptr from stack here */
11897 /* write 4 bytes into packet */
11898 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11899 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11900 BPF_EXIT_INSN(),
11901 },
11902 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11903 .errstr = "R4 invalid mem access",
11904 .result = REJECT,
11905 },
11906 {
11907 "calls: pkt_ptr spill into caller stack 7",
11908 .insns = {
11909 BPF_MOV64_IMM(BPF_REG_2, 0),
11910 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11912 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11914 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11915 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11916 BPF_EXIT_INSN(),
11917
11918 /* subprog 1 */
11919 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11920 offsetof(struct __sk_buff, data)),
11921 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11922 offsetof(struct __sk_buff, data_end)),
11923 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11925 BPF_MOV64_IMM(BPF_REG_5, 0),
11926 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11927 /* spill checked pkt_ptr into stack of caller */
11928 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11929 BPF_MOV64_IMM(BPF_REG_5, 1),
11930 /* don't read back pkt_ptr from stack here */
11931 /* write 4 bytes into packet */
11932 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11933 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11934 BPF_EXIT_INSN(),
11935 },
11936 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11937 .errstr = "R4 invalid mem access",
11938 .result = REJECT,
11939 },
11940 {
11941 "calls: pkt_ptr spill into caller stack 8",
11942 .insns = {
11943 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11944 offsetof(struct __sk_buff, data)),
11945 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11946 offsetof(struct __sk_buff, data_end)),
11947 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11949 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11950 BPF_EXIT_INSN(),
11951 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11953 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11955 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11956 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11957 BPF_EXIT_INSN(),
11958
11959 /* subprog 1 */
11960 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11961 offsetof(struct __sk_buff, data)),
11962 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11963 offsetof(struct __sk_buff, data_end)),
11964 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11966 BPF_MOV64_IMM(BPF_REG_5, 0),
11967 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11968 /* spill checked pkt_ptr into stack of caller */
11969 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11970 BPF_MOV64_IMM(BPF_REG_5, 1),
11971 /* don't read back pkt_ptr from stack here */
11972 /* write 4 bytes into packet */
11973 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11974 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11975 BPF_EXIT_INSN(),
11976 },
11977 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11978 .result = ACCEPT,
11979 },
11980 {
11981 "calls: pkt_ptr spill into caller stack 9",
11982 .insns = {
11983 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11984 offsetof(struct __sk_buff, data)),
11985 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11986 offsetof(struct __sk_buff, data_end)),
11987 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11989 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11990 BPF_EXIT_INSN(),
11991 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11993 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11995 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11996 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11997 BPF_EXIT_INSN(),
11998
11999 /* subprog 1 */
12000 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12001 offsetof(struct __sk_buff, data)),
12002 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12003 offsetof(struct __sk_buff, data_end)),
12004 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12006 BPF_MOV64_IMM(BPF_REG_5, 0),
12007 /* spill unchecked pkt_ptr into stack of caller */
12008 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12009 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12010 BPF_MOV64_IMM(BPF_REG_5, 1),
12011 /* don't read back pkt_ptr from stack here */
12012 /* write 4 bytes into packet */
12013 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12014 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12015 BPF_EXIT_INSN(),
12016 },
12017 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12018 .errstr = "invalid access to packet",
12019 .result = REJECT,
12020 },
12021 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080012022 "calls: caller stack init to zero or map_value_or_null",
12023 .insns = {
12024 BPF_MOV64_IMM(BPF_REG_0, 0),
12025 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12026 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12028 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12029 /* fetch map_value_or_null or const_zero from stack */
12030 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12031 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12032 /* store into map_value */
12033 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12034 BPF_EXIT_INSN(),
12035
12036 /* subprog 1 */
12037 /* if (ctx == 0) return; */
12038 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12039 /* else bpf_map_lookup() and *(fp - 8) = r0 */
12040 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12041 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12043 BPF_LD_MAP_FD(BPF_REG_1, 0),
12044 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12046 BPF_FUNC_map_lookup_elem),
12047 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12048 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12049 BPF_EXIT_INSN(),
12050 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012051 .fixup_map_hash_8b = { 13 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080012052 .result = ACCEPT,
12053 .prog_type = BPF_PROG_TYPE_XDP,
12054 },
12055 {
12056 "calls: stack init to zero and pruning",
12057 .insns = {
12058 /* first make allocated_stack 16 byte */
12059 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12060 /* now fork the execution such that the false branch
12061 * of JGT insn will be verified second and it skisp zero
12062 * init of fp-8 stack slot. If stack liveness marking
12063 * is missing live_read marks from call map_lookup
12064 * processing then pruning will incorrectly assume
12065 * that fp-8 stack slot was unused in the fall-through
12066 * branch and will accept the program incorrectly
12067 */
12068 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12069 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12070 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12071 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12073 BPF_LD_MAP_FD(BPF_REG_1, 0),
12074 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12075 BPF_FUNC_map_lookup_elem),
12076 BPF_EXIT_INSN(),
12077 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012078 .fixup_map_hash_48b = { 6 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080012079 .errstr = "invalid indirect read from stack off -8+0 size 8",
12080 .result = REJECT,
12081 .prog_type = BPF_PROG_TYPE_XDP,
12082 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000012083 {
Daniel Borkmann06be0862018-06-02 23:06:31 +020012084 "calls: two calls returning different map pointers for lookup (hash, array)",
12085 .insns = {
12086 /* main prog */
12087 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12088 BPF_CALL_REL(11),
12089 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12090 BPF_CALL_REL(12),
12091 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12092 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12093 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12095 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12096 BPF_FUNC_map_lookup_elem),
12097 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12098 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12099 offsetof(struct test_val, foo)),
12100 BPF_MOV64_IMM(BPF_REG_0, 1),
12101 BPF_EXIT_INSN(),
12102 /* subprog 1 */
12103 BPF_LD_MAP_FD(BPF_REG_0, 0),
12104 BPF_EXIT_INSN(),
12105 /* subprog 2 */
12106 BPF_LD_MAP_FD(BPF_REG_0, 0),
12107 BPF_EXIT_INSN(),
12108 },
12109 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Prashant Bhole908142e2018-10-09 10:04:53 +090012110 .fixup_map_hash_48b = { 13 },
12111 .fixup_map_array_48b = { 16 },
Daniel Borkmann06be0862018-06-02 23:06:31 +020012112 .result = ACCEPT,
12113 .retval = 1,
12114 },
12115 {
12116 "calls: two calls returning different map pointers for lookup (hash, map in map)",
12117 .insns = {
12118 /* main prog */
12119 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12120 BPF_CALL_REL(11),
12121 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12122 BPF_CALL_REL(12),
12123 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12124 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12128 BPF_FUNC_map_lookup_elem),
12129 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12130 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12131 offsetof(struct test_val, foo)),
12132 BPF_MOV64_IMM(BPF_REG_0, 1),
12133 BPF_EXIT_INSN(),
12134 /* subprog 1 */
12135 BPF_LD_MAP_FD(BPF_REG_0, 0),
12136 BPF_EXIT_INSN(),
12137 /* subprog 2 */
12138 BPF_LD_MAP_FD(BPF_REG_0, 0),
12139 BPF_EXIT_INSN(),
12140 },
12141 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12142 .fixup_map_in_map = { 16 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012143 .fixup_map_array_48b = { 13 },
Daniel Borkmann06be0862018-06-02 23:06:31 +020012144 .result = REJECT,
12145 .errstr = "R0 invalid mem access 'map_ptr'",
12146 },
12147 {
12148 "cond: two branches returning different map pointers for lookup (tail, tail)",
12149 .insns = {
12150 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12151 offsetof(struct __sk_buff, mark)),
12152 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12153 BPF_LD_MAP_FD(BPF_REG_2, 0),
12154 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12155 BPF_LD_MAP_FD(BPF_REG_2, 0),
12156 BPF_MOV64_IMM(BPF_REG_3, 7),
12157 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12158 BPF_FUNC_tail_call),
12159 BPF_MOV64_IMM(BPF_REG_0, 1),
12160 BPF_EXIT_INSN(),
12161 },
12162 .fixup_prog1 = { 5 },
12163 .fixup_prog2 = { 2 },
12164 .result_unpriv = REJECT,
12165 .errstr_unpriv = "tail_call abusing map_ptr",
12166 .result = ACCEPT,
12167 .retval = 42,
12168 },
12169 {
12170 "cond: two branches returning same map pointers for lookup (tail, tail)",
12171 .insns = {
12172 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12173 offsetof(struct __sk_buff, mark)),
12174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12175 BPF_LD_MAP_FD(BPF_REG_2, 0),
12176 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12177 BPF_LD_MAP_FD(BPF_REG_2, 0),
12178 BPF_MOV64_IMM(BPF_REG_3, 7),
12179 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12180 BPF_FUNC_tail_call),
12181 BPF_MOV64_IMM(BPF_REG_0, 1),
12182 BPF_EXIT_INSN(),
12183 },
12184 .fixup_prog2 = { 2, 5 },
12185 .result_unpriv = ACCEPT,
12186 .result = ACCEPT,
12187 .retval = 42,
12188 },
12189 {
Gianluca Borellofd05e572017-12-23 10:09:55 +000012190 "search pruning: all branches should be verified (nop operation)",
12191 .insns = {
12192 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12193 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12194 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12195 BPF_LD_MAP_FD(BPF_REG_1, 0),
12196 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12197 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12198 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12200 BPF_MOV64_IMM(BPF_REG_4, 0),
12201 BPF_JMP_A(1),
12202 BPF_MOV64_IMM(BPF_REG_4, 1),
12203 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12204 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12205 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12206 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12207 BPF_MOV64_IMM(BPF_REG_6, 0),
12208 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12209 BPF_EXIT_INSN(),
12210 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012211 .fixup_map_hash_8b = { 3 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000012212 .errstr = "R6 invalid mem access 'inv'",
12213 .result = REJECT,
12214 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12215 },
12216 {
12217 "search pruning: all branches should be verified (invalid stack access)",
12218 .insns = {
12219 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12221 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12222 BPF_LD_MAP_FD(BPF_REG_1, 0),
12223 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12225 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12226 BPF_MOV64_IMM(BPF_REG_4, 0),
12227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12228 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12229 BPF_JMP_A(1),
12230 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12231 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12232 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12233 BPF_EXIT_INSN(),
12234 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012235 .fixup_map_hash_8b = { 3 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000012236 .errstr = "invalid read from stack off -16+0 size 8",
12237 .result = REJECT,
12238 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12239 },
Daniel Borkmann23d191a2018-02-24 01:08:03 +010012240 {
12241 "jit: lsh, rsh, arsh by 1",
12242 .insns = {
12243 BPF_MOV64_IMM(BPF_REG_0, 1),
12244 BPF_MOV64_IMM(BPF_REG_1, 0xff),
12245 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12246 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12247 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12248 BPF_EXIT_INSN(),
12249 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12250 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12252 BPF_EXIT_INSN(),
12253 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12254 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12255 BPF_EXIT_INSN(),
12256 BPF_MOV64_IMM(BPF_REG_0, 2),
12257 BPF_EXIT_INSN(),
12258 },
12259 .result = ACCEPT,
12260 .retval = 2,
12261 },
12262 {
12263 "jit: mov32 for ldimm64, 1",
12264 .insns = {
12265 BPF_MOV64_IMM(BPF_REG_0, 2),
12266 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12267 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12268 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12269 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12270 BPF_MOV64_IMM(BPF_REG_0, 1),
12271 BPF_EXIT_INSN(),
12272 },
12273 .result = ACCEPT,
12274 .retval = 2,
12275 },
12276 {
12277 "jit: mov32 for ldimm64, 2",
12278 .insns = {
12279 BPF_MOV64_IMM(BPF_REG_0, 1),
12280 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12281 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12282 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12283 BPF_MOV64_IMM(BPF_REG_0, 2),
12284 BPF_EXIT_INSN(),
12285 },
12286 .result = ACCEPT,
12287 .retval = 2,
12288 },
12289 {
12290 "jit: various mul tests",
12291 .insns = {
12292 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12293 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12294 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12295 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12296 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12297 BPF_MOV64_IMM(BPF_REG_0, 1),
12298 BPF_EXIT_INSN(),
12299 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12300 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12301 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12302 BPF_MOV64_IMM(BPF_REG_0, 1),
12303 BPF_EXIT_INSN(),
12304 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12305 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12306 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12307 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12308 BPF_MOV64_IMM(BPF_REG_0, 1),
12309 BPF_EXIT_INSN(),
12310 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12311 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12312 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12313 BPF_MOV64_IMM(BPF_REG_0, 1),
12314 BPF_EXIT_INSN(),
12315 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12316 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12317 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12318 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12319 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12320 BPF_MOV64_IMM(BPF_REG_0, 1),
12321 BPF_EXIT_INSN(),
12322 BPF_MOV64_IMM(BPF_REG_0, 2),
12323 BPF_EXIT_INSN(),
12324 },
12325 .result = ACCEPT,
12326 .retval = 2,
12327 },
David S. Miller0f3e9c92018-03-06 00:53:44 -050012328 {
Daniel Borkmannca369602018-02-23 22:29:05 +010012329 "xadd/w check unaligned stack",
12330 .insns = {
12331 BPF_MOV64_IMM(BPF_REG_0, 1),
12332 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12333 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12334 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12335 BPF_EXIT_INSN(),
12336 },
12337 .result = REJECT,
12338 .errstr = "misaligned stack access off",
12339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12340 },
12341 {
12342 "xadd/w check unaligned map",
12343 .insns = {
12344 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12345 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12347 BPF_LD_MAP_FD(BPF_REG_1, 0),
12348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12349 BPF_FUNC_map_lookup_elem),
12350 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12351 BPF_EXIT_INSN(),
12352 BPF_MOV64_IMM(BPF_REG_1, 1),
12353 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12354 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12355 BPF_EXIT_INSN(),
12356 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012357 .fixup_map_hash_8b = { 3 },
Daniel Borkmannca369602018-02-23 22:29:05 +010012358 .result = REJECT,
12359 .errstr = "misaligned value access off",
12360 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12361 },
12362 {
12363 "xadd/w check unaligned pkt",
12364 .insns = {
12365 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12366 offsetof(struct xdp_md, data)),
12367 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12368 offsetof(struct xdp_md, data_end)),
12369 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12371 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12372 BPF_MOV64_IMM(BPF_REG_0, 99),
12373 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12374 BPF_MOV64_IMM(BPF_REG_0, 1),
12375 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12376 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
12377 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
12378 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
12379 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
12380 BPF_EXIT_INSN(),
12381 },
12382 .result = REJECT,
Joe Stringer9d2be442018-10-02 13:35:31 -070012383 .errstr = "BPF_XADD stores into R2 ctx",
Daniel Borkmannca369602018-02-23 22:29:05 +010012384 .prog_type = BPF_PROG_TYPE_XDP,
12385 },
Yonghong Song2abe611c2018-04-28 22:28:14 -070012386 {
Daniel Borkmannfa47a162018-07-19 18:18:36 +020012387 "xadd/w check whether src/dst got mangled, 1",
12388 .insns = {
12389 BPF_MOV64_IMM(BPF_REG_0, 1),
12390 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12391 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12392 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12393 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12394 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12395 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12396 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12397 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12398 BPF_EXIT_INSN(),
12399 BPF_MOV64_IMM(BPF_REG_0, 42),
12400 BPF_EXIT_INSN(),
12401 },
12402 .result = ACCEPT,
12403 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12404 .retval = 3,
12405 },
12406 {
12407 "xadd/w check whether src/dst got mangled, 2",
12408 .insns = {
12409 BPF_MOV64_IMM(BPF_REG_0, 1),
12410 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12411 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12412 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12413 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12414 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12415 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12416 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12417 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12418 BPF_EXIT_INSN(),
12419 BPF_MOV64_IMM(BPF_REG_0, 42),
12420 BPF_EXIT_INSN(),
12421 },
12422 .result = ACCEPT,
12423 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12424 .retval = 3,
12425 },
12426 {
Yonghong Song2abe611c2018-04-28 22:28:14 -070012427 "bpf_get_stack return R0 within range",
12428 .insns = {
12429 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12430 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12431 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12432 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12433 BPF_LD_MAP_FD(BPF_REG_1, 0),
12434 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12435 BPF_FUNC_map_lookup_elem),
12436 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
12437 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12438 BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
12439 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12440 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12441 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
12442 BPF_MOV64_IMM(BPF_REG_4, 256),
12443 BPF_EMIT_CALL(BPF_FUNC_get_stack),
12444 BPF_MOV64_IMM(BPF_REG_1, 0),
12445 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12446 BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
12447 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
12448 BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
12449 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
12450 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12451 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
12452 BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
12453 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
12454 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
12455 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
12456 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
12457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12458 BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
12459 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
12460 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
12461 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12462 BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
12463 BPF_MOV64_IMM(BPF_REG_4, 0),
12464 BPF_EMIT_CALL(BPF_FUNC_get_stack),
12465 BPF_EXIT_INSN(),
12466 },
Prashant Bhole908142e2018-10-09 10:04:53 +090012467 .fixup_map_hash_48b = { 4 },
Yonghong Song2abe611c2018-04-28 22:28:14 -070012468 .result = ACCEPT,
12469 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12470 },
Daniel Borkmann93731ef2018-05-04 01:08:13 +020012471 {
12472 "ld_abs: invalid op 1",
12473 .insns = {
12474 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12475 BPF_LD_ABS(BPF_DW, 0),
12476 BPF_EXIT_INSN(),
12477 },
12478 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12479 .result = REJECT,
12480 .errstr = "unknown opcode",
12481 },
12482 {
12483 "ld_abs: invalid op 2",
12484 .insns = {
12485 BPF_MOV32_IMM(BPF_REG_0, 256),
12486 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12487 BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
12488 BPF_EXIT_INSN(),
12489 },
12490 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12491 .result = REJECT,
12492 .errstr = "unknown opcode",
12493 },
12494 {
12495 "ld_abs: nmap reduced",
12496 .insns = {
12497 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12498 BPF_LD_ABS(BPF_H, 12),
12499 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
12500 BPF_LD_ABS(BPF_H, 12),
12501 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
12502 BPF_MOV32_IMM(BPF_REG_0, 18),
12503 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
12504 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
12505 BPF_LD_IND(BPF_W, BPF_REG_7, 14),
12506 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
12507 BPF_MOV32_IMM(BPF_REG_0, 280971478),
12508 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12509 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12510 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
12511 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12512 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
12513 BPF_LD_ABS(BPF_H, 12),
12514 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
12515 BPF_MOV32_IMM(BPF_REG_0, 22),
12516 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12517 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12518 BPF_LD_IND(BPF_H, BPF_REG_7, 14),
12519 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
12520 BPF_MOV32_IMM(BPF_REG_0, 17366),
12521 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
12522 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
12523 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
12524 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12525 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12526 BPF_MOV32_IMM(BPF_REG_0, 256),
12527 BPF_EXIT_INSN(),
12528 BPF_MOV32_IMM(BPF_REG_0, 0),
12529 BPF_EXIT_INSN(),
12530 },
12531 .data = {
12532 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
12533 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12534 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
12535 },
12536 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12537 .result = ACCEPT,
12538 .retval = 256,
12539 },
12540 {
12541 "ld_abs: div + abs, test 1",
12542 .insns = {
12543 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12544 BPF_LD_ABS(BPF_B, 3),
12545 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12546 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12547 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12548 BPF_LD_ABS(BPF_B, 4),
12549 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12550 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12551 BPF_EXIT_INSN(),
12552 },
12553 .data = {
12554 10, 20, 30, 40, 50,
12555 },
12556 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12557 .result = ACCEPT,
12558 .retval = 10,
12559 },
12560 {
12561 "ld_abs: div + abs, test 2",
12562 .insns = {
12563 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12564 BPF_LD_ABS(BPF_B, 3),
12565 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12566 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12567 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12568 BPF_LD_ABS(BPF_B, 128),
12569 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12570 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12571 BPF_EXIT_INSN(),
12572 },
12573 .data = {
12574 10, 20, 30, 40, 50,
12575 },
12576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12577 .result = ACCEPT,
12578 .retval = 0,
12579 },
12580 {
12581 "ld_abs: div + abs, test 3",
12582 .insns = {
12583 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12584 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12585 BPF_LD_ABS(BPF_B, 3),
12586 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12587 BPF_EXIT_INSN(),
12588 },
12589 .data = {
12590 10, 20, 30, 40, 50,
12591 },
12592 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12593 .result = ACCEPT,
12594 .retval = 0,
12595 },
12596 {
12597 "ld_abs: div + abs, test 4",
12598 .insns = {
12599 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12600 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12601 BPF_LD_ABS(BPF_B, 256),
12602 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12603 BPF_EXIT_INSN(),
12604 },
12605 .data = {
12606 10, 20, 30, 40, 50,
12607 },
12608 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12609 .result = ACCEPT,
12610 .retval = 0,
12611 },
12612 {
12613 "ld_abs: vlan + abs, test 1",
12614 .insns = { },
12615 .data = {
12616 0x34,
12617 },
12618 .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
12619 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12620 .result = ACCEPT,
12621 .retval = 0xbef,
12622 },
12623 {
12624 "ld_abs: vlan + abs, test 2",
12625 .insns = {
12626 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12627 BPF_LD_ABS(BPF_B, 0),
12628 BPF_LD_ABS(BPF_H, 0),
12629 BPF_LD_ABS(BPF_W, 0),
12630 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12631 BPF_MOV64_IMM(BPF_REG_6, 0),
12632 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12633 BPF_MOV64_IMM(BPF_REG_2, 1),
12634 BPF_MOV64_IMM(BPF_REG_3, 2),
12635 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12636 BPF_FUNC_skb_vlan_push),
12637 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12638 BPF_LD_ABS(BPF_B, 0),
12639 BPF_LD_ABS(BPF_H, 0),
12640 BPF_LD_ABS(BPF_W, 0),
12641 BPF_MOV64_IMM(BPF_REG_0, 42),
12642 BPF_EXIT_INSN(),
12643 },
12644 .data = {
12645 0x34,
12646 },
12647 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12648 .result = ACCEPT,
12649 .retval = 42,
12650 },
12651 {
12652 "ld_abs: jump around ld_abs",
12653 .insns = { },
12654 .data = {
12655 10, 11,
12656 },
12657 .fill_helper = bpf_fill_jump_around_ld_abs,
12658 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12659 .result = ACCEPT,
12660 .retval = 10,
12661 },
Daniel Borkmanna82d8cd2018-05-14 23:22:34 +020012662 {
12663 "ld_dw: xor semi-random 64 bit imms, test 1",
12664 .insns = { },
12665 .data = { },
12666 .fill_helper = bpf_fill_rand_ld_dw,
12667 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12668 .result = ACCEPT,
12669 .retval = 4090,
12670 },
12671 {
12672 "ld_dw: xor semi-random 64 bit imms, test 2",
12673 .insns = { },
12674 .data = { },
12675 .fill_helper = bpf_fill_rand_ld_dw,
12676 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12677 .result = ACCEPT,
12678 .retval = 2047,
12679 },
12680 {
12681 "ld_dw: xor semi-random 64 bit imms, test 3",
12682 .insns = { },
12683 .data = { },
12684 .fill_helper = bpf_fill_rand_ld_dw,
12685 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12686 .result = ACCEPT,
12687 .retval = 511,
12688 },
12689 {
12690 "ld_dw: xor semi-random 64 bit imms, test 4",
12691 .insns = { },
12692 .data = { },
12693 .fill_helper = bpf_fill_rand_ld_dw,
12694 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12695 .result = ACCEPT,
12696 .retval = 5,
12697 },
Daniel Borkmann58990d12018-06-07 17:40:03 +020012698 {
12699 "pass unmodified ctx pointer to helper",
12700 .insns = {
12701 BPF_MOV64_IMM(BPF_REG_2, 0),
12702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12703 BPF_FUNC_csum_update),
12704 BPF_MOV64_IMM(BPF_REG_0, 0),
12705 BPF_EXIT_INSN(),
12706 },
12707 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12708 .result = ACCEPT,
12709 },
12710 {
Joe Stringerb584ab82018-10-02 13:35:38 -070012711 "reference tracking: leak potential reference",
12712 .insns = {
12713 BPF_SK_LOOKUP,
12714 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
12715 BPF_EXIT_INSN(),
12716 },
12717 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12718 .errstr = "Unreleased reference",
12719 .result = REJECT,
12720 },
12721 {
12722 "reference tracking: leak potential reference on stack",
12723 .insns = {
12724 BPF_SK_LOOKUP,
12725 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12727 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
12728 BPF_MOV64_IMM(BPF_REG_0, 0),
12729 BPF_EXIT_INSN(),
12730 },
12731 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12732 .errstr = "Unreleased reference",
12733 .result = REJECT,
12734 },
12735 {
12736 "reference tracking: leak potential reference on stack 2",
12737 .insns = {
12738 BPF_SK_LOOKUP,
12739 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12741 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
12742 BPF_MOV64_IMM(BPF_REG_0, 0),
12743 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
12744 BPF_EXIT_INSN(),
12745 },
12746 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12747 .errstr = "Unreleased reference",
12748 .result = REJECT,
12749 },
12750 {
12751 "reference tracking: zero potential reference",
12752 .insns = {
12753 BPF_SK_LOOKUP,
12754 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
12755 BPF_EXIT_INSN(),
12756 },
12757 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12758 .errstr = "Unreleased reference",
12759 .result = REJECT,
12760 },
12761 {
12762 "reference tracking: copy and zero potential references",
12763 .insns = {
12764 BPF_SK_LOOKUP,
12765 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12766 BPF_MOV64_IMM(BPF_REG_0, 0),
12767 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
12768 BPF_EXIT_INSN(),
12769 },
12770 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12771 .errstr = "Unreleased reference",
12772 .result = REJECT,
12773 },
12774 {
12775 "reference tracking: release reference without check",
12776 .insns = {
12777 BPF_SK_LOOKUP,
12778 /* reference in r0 may be NULL */
12779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12780 BPF_MOV64_IMM(BPF_REG_2, 0),
12781 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12782 BPF_EXIT_INSN(),
12783 },
12784 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12785 .errstr = "type=sock_or_null expected=sock",
12786 .result = REJECT,
12787 },
12788 {
12789 "reference tracking: release reference",
12790 .insns = {
12791 BPF_SK_LOOKUP,
12792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12794 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12795 BPF_EXIT_INSN(),
12796 },
12797 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12798 .result = ACCEPT,
12799 },
12800 {
12801 "reference tracking: release reference 2",
12802 .insns = {
12803 BPF_SK_LOOKUP,
12804 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12805 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12806 BPF_EXIT_INSN(),
12807 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12808 BPF_EXIT_INSN(),
12809 },
12810 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12811 .result = ACCEPT,
12812 },
12813 {
12814 "reference tracking: release reference twice",
12815 .insns = {
12816 BPF_SK_LOOKUP,
12817 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12818 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12820 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12821 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12822 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12823 BPF_EXIT_INSN(),
12824 },
12825 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12826 .errstr = "type=inv expected=sock",
12827 .result = REJECT,
12828 },
12829 {
12830 "reference tracking: release reference twice inside branch",
12831 .insns = {
12832 BPF_SK_LOOKUP,
12833 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12834 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12835 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
12836 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12838 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12839 BPF_EXIT_INSN(),
12840 },
12841 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12842 .errstr = "type=inv expected=sock",
12843 .result = REJECT,
12844 },
12845 {
12846 "reference tracking: alloc, check, free in one subbranch",
12847 .insns = {
12848 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12849 offsetof(struct __sk_buff, data)),
12850 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12851 offsetof(struct __sk_buff, data_end)),
12852 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
12854 /* if (offsetof(skb, mark) > data_len) exit; */
12855 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12856 BPF_EXIT_INSN(),
12857 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
12858 offsetof(struct __sk_buff, mark)),
12859 BPF_SK_LOOKUP,
12860 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
12861 /* Leak reference in R0 */
12862 BPF_EXIT_INSN(),
12863 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
12864 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12865 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12866 BPF_EXIT_INSN(),
12867 },
12868 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12869 .errstr = "Unreleased reference",
12870 .result = REJECT,
12871 },
12872 {
12873 "reference tracking: alloc, check, free in both subbranches",
12874 .insns = {
12875 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12876 offsetof(struct __sk_buff, data)),
12877 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12878 offsetof(struct __sk_buff, data_end)),
12879 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12880 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
12881 /* if (offsetof(skb, mark) > data_len) exit; */
12882 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12883 BPF_EXIT_INSN(),
12884 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
12885 offsetof(struct __sk_buff, mark)),
12886 BPF_SK_LOOKUP,
12887 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
12888 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
12889 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12890 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12891 BPF_EXIT_INSN(),
12892 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
12893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12894 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12895 BPF_EXIT_INSN(),
12896 },
12897 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12898 .result = ACCEPT,
12899 },
12900 {
12901 "reference tracking in call: free reference in subprog",
12902 .insns = {
12903 BPF_SK_LOOKUP,
12904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
12905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12906 BPF_MOV64_IMM(BPF_REG_0, 0),
12907 BPF_EXIT_INSN(),
12908
12909 /* subprog 1 */
12910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
12911 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
12912 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12913 BPF_EXIT_INSN(),
12914 },
12915 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12916 .result = ACCEPT,
12917 },
12918 {
Daniel Borkmann58990d12018-06-07 17:40:03 +020012919 "pass modified ctx pointer to helper, 1",
12920 .insns = {
12921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
12922 BPF_MOV64_IMM(BPF_REG_2, 0),
12923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12924 BPF_FUNC_csum_update),
12925 BPF_MOV64_IMM(BPF_REG_0, 0),
12926 BPF_EXIT_INSN(),
12927 },
12928 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12929 .result = REJECT,
12930 .errstr = "dereference of modified ctx ptr",
12931 },
12932 {
12933 "pass modified ctx pointer to helper, 2",
12934 .insns = {
12935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
12936 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12937 BPF_FUNC_get_socket_cookie),
12938 BPF_MOV64_IMM(BPF_REG_0, 0),
12939 BPF_EXIT_INSN(),
12940 },
12941 .result_unpriv = REJECT,
12942 .result = REJECT,
12943 .errstr_unpriv = "dereference of modified ctx ptr",
12944 .errstr = "dereference of modified ctx ptr",
12945 },
12946 {
12947 "pass modified ctx pointer to helper, 3",
12948 .insns = {
12949 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
12950 BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
12951 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
12952 BPF_MOV64_IMM(BPF_REG_2, 0),
12953 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12954 BPF_FUNC_csum_update),
12955 BPF_MOV64_IMM(BPF_REG_0, 0),
12956 BPF_EXIT_INSN(),
12957 },
12958 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12959 .result = REJECT,
12960 .errstr = "variable ctx access var_off=(0x0; 0x4)",
12961 },
Arthur Fabrefbeb1602018-07-31 18:17:22 +010012962 {
12963 "mov64 src == dst",
12964 .insns = {
12965 BPF_MOV64_IMM(BPF_REG_2, 0),
12966 BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
12967 // Check bounds are OK
12968 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
12969 BPF_MOV64_IMM(BPF_REG_0, 0),
12970 BPF_EXIT_INSN(),
12971 },
12972 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12973 .result = ACCEPT,
12974 },
12975 {
12976 "mov64 src != dst",
12977 .insns = {
12978 BPF_MOV64_IMM(BPF_REG_3, 0),
12979 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
12980 // Check bounds are OK
12981 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
12982 BPF_MOV64_IMM(BPF_REG_0, 0),
12983 BPF_EXIT_INSN(),
12984 },
12985 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12986 .result = ACCEPT,
12987 },
Joe Stringerb584ab82018-10-02 13:35:38 -070012988 {
12989 "reference tracking in call: free reference in subprog and outside",
12990 .insns = {
12991 BPF_SK_LOOKUP,
12992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
12993 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12995 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12996 BPF_EMIT_CALL(BPF_FUNC_sk_release),
12997 BPF_EXIT_INSN(),
12998
12999 /* subprog 1 */
13000 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13001 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13002 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13003 BPF_EXIT_INSN(),
13004 },
13005 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13006 .errstr = "type=inv expected=sock",
13007 .result = REJECT,
13008 },
13009 {
13010 "reference tracking in call: alloc & leak reference in subprog",
13011 .insns = {
13012 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13015 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13016 BPF_MOV64_IMM(BPF_REG_0, 0),
13017 BPF_EXIT_INSN(),
13018
13019 /* subprog 1 */
13020 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13021 BPF_SK_LOOKUP,
13022 /* spill unchecked sk_ptr into stack of caller */
13023 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13024 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13025 BPF_EXIT_INSN(),
13026 },
13027 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13028 .errstr = "Unreleased reference",
13029 .result = REJECT,
13030 },
13031 {
13032 "reference tracking in call: alloc in subprog, release outside",
13033 .insns = {
13034 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13035 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13036 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13037 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13038 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13039 BPF_EXIT_INSN(),
13040
13041 /* subprog 1 */
13042 BPF_SK_LOOKUP,
13043 BPF_EXIT_INSN(), /* return sk */
13044 },
13045 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13046 .retval = POINTER_VALUE,
13047 .result = ACCEPT,
13048 },
13049 {
13050 "reference tracking in call: sk_ptr leak into caller stack",
13051 .insns = {
13052 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13053 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13054 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13055 BPF_MOV64_IMM(BPF_REG_0, 0),
13056 BPF_EXIT_INSN(),
13057
13058 /* subprog 1 */
13059 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13061 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13062 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13063 /* spill unchecked sk_ptr into stack of caller */
13064 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13066 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13067 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13068 BPF_EXIT_INSN(),
13069
13070 /* subprog 2 */
13071 BPF_SK_LOOKUP,
13072 BPF_EXIT_INSN(),
13073 },
13074 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13075 .errstr = "Unreleased reference",
13076 .result = REJECT,
13077 },
13078 {
13079 "reference tracking in call: sk_ptr spill into caller stack",
13080 .insns = {
13081 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13084 BPF_MOV64_IMM(BPF_REG_0, 0),
13085 BPF_EXIT_INSN(),
13086
13087 /* subprog 1 */
13088 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13089 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13090 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13091 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13092 /* spill unchecked sk_ptr into stack of caller */
13093 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13095 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13096 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13097 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13098 /* now the sk_ptr is verified, free the reference */
13099 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13100 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13101 BPF_EXIT_INSN(),
13102
13103 /* subprog 2 */
13104 BPF_SK_LOOKUP,
13105 BPF_EXIT_INSN(),
13106 },
13107 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13108 .result = ACCEPT,
13109 },
13110 {
13111 "reference tracking: allow LD_ABS",
13112 .insns = {
13113 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13114 BPF_SK_LOOKUP,
13115 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13116 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13117 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13118 BPF_LD_ABS(BPF_B, 0),
13119 BPF_LD_ABS(BPF_H, 0),
13120 BPF_LD_ABS(BPF_W, 0),
13121 BPF_EXIT_INSN(),
13122 },
13123 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13124 .result = ACCEPT,
13125 },
13126 {
13127 "reference tracking: forbid LD_ABS while holding reference",
13128 .insns = {
13129 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13130 BPF_SK_LOOKUP,
13131 BPF_LD_ABS(BPF_B, 0),
13132 BPF_LD_ABS(BPF_H, 0),
13133 BPF_LD_ABS(BPF_W, 0),
13134 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13135 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13136 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13137 BPF_EXIT_INSN(),
13138 },
13139 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13140 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13141 .result = REJECT,
13142 },
13143 {
13144 "reference tracking: allow LD_IND",
13145 .insns = {
13146 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13147 BPF_SK_LOOKUP,
13148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13150 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13151 BPF_MOV64_IMM(BPF_REG_7, 1),
13152 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13153 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13154 BPF_EXIT_INSN(),
13155 },
13156 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13157 .result = ACCEPT,
13158 .retval = 1,
13159 },
13160 {
13161 "reference tracking: forbid LD_IND while holding reference",
13162 .insns = {
13163 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13164 BPF_SK_LOOKUP,
13165 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13166 BPF_MOV64_IMM(BPF_REG_7, 1),
13167 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13168 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13169 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13171 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13172 BPF_EXIT_INSN(),
13173 },
13174 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13175 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13176 .result = REJECT,
13177 },
13178 {
13179 "reference tracking: check reference or tail call",
13180 .insns = {
13181 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13182 BPF_SK_LOOKUP,
13183 /* if (sk) bpf_sk_release() */
13184 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13185 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13186 /* bpf_tail_call() */
13187 BPF_MOV64_IMM(BPF_REG_3, 2),
13188 BPF_LD_MAP_FD(BPF_REG_2, 0),
13189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13191 BPF_FUNC_tail_call),
13192 BPF_MOV64_IMM(BPF_REG_0, 0),
13193 BPF_EXIT_INSN(),
13194 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13195 BPF_EXIT_INSN(),
13196 },
13197 .fixup_prog1 = { 17 },
13198 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13199 .result = ACCEPT,
13200 },
13201 {
13202 "reference tracking: release reference then tail call",
13203 .insns = {
13204 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13205 BPF_SK_LOOKUP,
13206 /* if (sk) bpf_sk_release() */
13207 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13209 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13210 /* bpf_tail_call() */
13211 BPF_MOV64_IMM(BPF_REG_3, 2),
13212 BPF_LD_MAP_FD(BPF_REG_2, 0),
13213 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13214 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13215 BPF_FUNC_tail_call),
13216 BPF_MOV64_IMM(BPF_REG_0, 0),
13217 BPF_EXIT_INSN(),
13218 },
13219 .fixup_prog1 = { 18 },
13220 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13221 .result = ACCEPT,
13222 },
13223 {
13224 "reference tracking: leak possible reference over tail call",
13225 .insns = {
13226 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13227 /* Look up socket and store in REG_6 */
13228 BPF_SK_LOOKUP,
13229 /* bpf_tail_call() */
13230 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13231 BPF_MOV64_IMM(BPF_REG_3, 2),
13232 BPF_LD_MAP_FD(BPF_REG_2, 0),
13233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13234 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13235 BPF_FUNC_tail_call),
13236 BPF_MOV64_IMM(BPF_REG_0, 0),
13237 /* if (sk) bpf_sk_release() */
13238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13239 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13240 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13241 BPF_EXIT_INSN(),
13242 },
13243 .fixup_prog1 = { 16 },
13244 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13245 .errstr = "tail_call would lead to reference leak",
13246 .result = REJECT,
13247 },
13248 {
13249 "reference tracking: leak checked reference over tail call",
13250 .insns = {
13251 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13252 /* Look up socket and store in REG_6 */
13253 BPF_SK_LOOKUP,
13254 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13255 /* if (!sk) goto end */
13256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13257 /* bpf_tail_call() */
13258 BPF_MOV64_IMM(BPF_REG_3, 0),
13259 BPF_LD_MAP_FD(BPF_REG_2, 0),
13260 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13261 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13262 BPF_FUNC_tail_call),
13263 BPF_MOV64_IMM(BPF_REG_0, 0),
13264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13265 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13266 BPF_EXIT_INSN(),
13267 },
13268 .fixup_prog1 = { 17 },
13269 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13270 .errstr = "tail_call would lead to reference leak",
13271 .result = REJECT,
13272 },
13273 {
13274 "reference tracking: mangle and release sock_or_null",
13275 .insns = {
13276 BPF_SK_LOOKUP,
13277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13279 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13280 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13281 BPF_EXIT_INSN(),
13282 },
13283 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13284 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
13285 .result = REJECT,
13286 },
13287 {
13288 "reference tracking: mangle and release sock",
13289 .insns = {
13290 BPF_SK_LOOKUP,
13291 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13294 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13295 BPF_EXIT_INSN(),
13296 },
13297 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13298 .errstr = "R1 pointer arithmetic on sock prohibited",
13299 .result = REJECT,
13300 },
13301 {
13302 "reference tracking: access member",
13303 .insns = {
13304 BPF_SK_LOOKUP,
13305 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13306 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13307 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13308 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13309 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13310 BPF_EXIT_INSN(),
13311 },
13312 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13313 .result = ACCEPT,
13314 },
13315 {
13316 "reference tracking: write to member",
13317 .insns = {
13318 BPF_SK_LOOKUP,
13319 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13321 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13322 BPF_LD_IMM64(BPF_REG_2, 42),
13323 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
13324 offsetof(struct bpf_sock, mark)),
13325 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13326 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13327 BPF_LD_IMM64(BPF_REG_0, 0),
13328 BPF_EXIT_INSN(),
13329 },
13330 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13331 .errstr = "cannot write into socket",
13332 .result = REJECT,
13333 },
13334 {
13335 "reference tracking: invalid 64-bit access of member",
13336 .insns = {
13337 BPF_SK_LOOKUP,
13338 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13339 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13340 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
13341 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13342 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13343 BPF_EXIT_INSN(),
13344 },
13345 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13346 .errstr = "invalid bpf_sock access off=0 size=8",
13347 .result = REJECT,
13348 },
13349 {
13350 "reference tracking: access after release",
13351 .insns = {
13352 BPF_SK_LOOKUP,
13353 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13355 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13356 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
13357 BPF_EXIT_INSN(),
13358 },
13359 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13360 .errstr = "!read_ok",
13361 .result = REJECT,
13362 },
13363 {
13364 "reference tracking: direct access for lookup",
13365 .insns = {
13366 /* Check that the packet is at least 64B long */
13367 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13368 offsetof(struct __sk_buff, data)),
13369 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13370 offsetof(struct __sk_buff, data_end)),
13371 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
13373 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
13374 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
13375 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
13376 BPF_MOV64_IMM(BPF_REG_4, 0),
13377 BPF_MOV64_IMM(BPF_REG_5, 0),
13378 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
13379 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13380 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13381 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13383 BPF_EMIT_CALL(BPF_FUNC_sk_release),
13384 BPF_EXIT_INSN(),
13385 },
13386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13387 .result = ACCEPT,
13388 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013389};
13390
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013391static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013392{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013393 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013394
13395 for (len = MAX_INSNS - 1; len > 0; --len)
13396 if (fp[len].code != 0 || fp[len].imm != 0)
13397 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013398 return len + 1;
13399}
13400
Daniel Borkmann06be0862018-06-02 23:06:31 +020013401static int create_map(uint32_t type, uint32_t size_key,
13402 uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013403{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013404 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013405
Daniel Borkmann06be0862018-06-02 23:06:31 +020013406 fd = bpf_create_map(type, size_key, size_value, max_elem,
13407 type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013408 if (fd < 0)
13409 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013410
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013411 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013412}
13413
Joe Stringer0c586072018-10-02 13:35:37 -070013414static int create_prog_dummy1(enum bpf_map_type prog_type)
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013415{
13416 struct bpf_insn prog[] = {
13417 BPF_MOV64_IMM(BPF_REG_0, 42),
13418 BPF_EXIT_INSN(),
13419 };
13420
Joe Stringer0c586072018-10-02 13:35:37 -070013421 return bpf_load_program(prog_type, prog,
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013422 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
13423}
13424
Joe Stringer0c586072018-10-02 13:35:37 -070013425static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013426{
13427 struct bpf_insn prog[] = {
13428 BPF_MOV64_IMM(BPF_REG_3, idx),
13429 BPF_LD_MAP_FD(BPF_REG_2, mfd),
13430 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13431 BPF_FUNC_tail_call),
13432 BPF_MOV64_IMM(BPF_REG_0, 41),
13433 BPF_EXIT_INSN(),
13434 };
13435
Joe Stringer0c586072018-10-02 13:35:37 -070013436 return bpf_load_program(prog_type, prog,
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013437 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
13438}
13439
Joe Stringer0c586072018-10-02 13:35:37 -070013440static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
13441 int p1key)
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013442{
Daniel Borkmann06be0862018-06-02 23:06:31 +020013443 int p2key = 1;
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013444 int mfd, p1fd, p2fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013445
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013446 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann06be0862018-06-02 23:06:31 +020013447 sizeof(int), max_elem, 0);
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013448 if (mfd < 0) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013449 printf("Failed to create prog array '%s'!\n", strerror(errno));
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013450 return -1;
13451 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013452
Joe Stringer0c586072018-10-02 13:35:37 -070013453 p1fd = create_prog_dummy1(prog_type);
13454 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
Daniel Borkmannb33eb732018-02-26 22:34:33 +010013455 if (p1fd < 0 || p2fd < 0)
13456 goto out;
13457 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
13458 goto out;
13459 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
13460 goto out;
13461 close(p2fd);
13462 close(p1fd);
13463
13464 return mfd;
13465out:
13466 close(p2fd);
13467 close(p1fd);
13468 close(mfd);
13469 return -1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013470}
13471
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013472static int create_map_in_map(void)
13473{
13474 int inner_map_fd, outer_map_fd;
13475
13476 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
13477 sizeof(int), 1, 0);
13478 if (inner_map_fd < 0) {
13479 printf("Failed to create array '%s'!\n", strerror(errno));
13480 return inner_map_fd;
13481 }
13482
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070013483 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013484 sizeof(int), inner_map_fd, 1, 0);
13485 if (outer_map_fd < 0)
13486 printf("Failed to create array of maps '%s'!\n",
13487 strerror(errno));
13488
13489 close(inner_map_fd);
13490
13491 return outer_map_fd;
13492}
13493
Roman Gushchina3c60542018-09-28 14:45:53 +000013494static int create_cgroup_storage(bool percpu)
Roman Gushchind4c9f572018-08-02 14:27:28 -070013495{
Roman Gushchina3c60542018-09-28 14:45:53 +000013496 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
13497 BPF_MAP_TYPE_CGROUP_STORAGE;
Roman Gushchind4c9f572018-08-02 14:27:28 -070013498 int fd;
13499
Roman Gushchina3c60542018-09-28 14:45:53 +000013500 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
Roman Gushchind4c9f572018-08-02 14:27:28 -070013501 TEST_DATA_LEN, 0, 0);
13502 if (fd < 0)
Roman Gushchina3c60542018-09-28 14:45:53 +000013503 printf("Failed to create cgroup storage '%s'!\n",
13504 strerror(errno));
Roman Gushchind4c9f572018-08-02 14:27:28 -070013505
13506 return fd;
13507}
13508
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013509static char bpf_vlog[UINT_MAX >> 8];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013510
Joe Stringer0c586072018-10-02 13:35:37 -070013511static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
13512 struct bpf_insn *prog, int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013513{
Prashant Bhole908142e2018-10-09 10:04:53 +090013514 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
13515 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
13516 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
13517 int *fixup_map_array_48b = test->fixup_map_array_48b;
Daniel Borkmann06be0862018-06-02 23:06:31 +020013518 int *fixup_prog1 = test->fixup_prog1;
13519 int *fixup_prog2 = test->fixup_prog2;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013520 int *fixup_map_in_map = test->fixup_map_in_map;
Roman Gushchind4c9f572018-08-02 14:27:28 -070013521 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
Roman Gushchina3c60542018-09-28 14:45:53 +000013522 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013523
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013524 if (test->fill_helper)
13525 test->fill_helper(test);
13526
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013527 /* Allocating HTs with 1 elem is fine here, since we only test
13528 * for verifier and not do a runtime lookup, so the only thing
13529 * that really matters is value size in this case.
13530 */
Prashant Bhole908142e2018-10-09 10:04:53 +090013531 if (*fixup_map_hash_8b) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013532 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13533 sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013534 do {
Prashant Bhole908142e2018-10-09 10:04:53 +090013535 prog[*fixup_map_hash_8b].imm = map_fds[0];
13536 fixup_map_hash_8b++;
13537 } while (*fixup_map_hash_8b);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013538 }
13539
Prashant Bhole908142e2018-10-09 10:04:53 +090013540 if (*fixup_map_hash_48b) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013541 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13542 sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013543 do {
Prashant Bhole908142e2018-10-09 10:04:53 +090013544 prog[*fixup_map_hash_48b].imm = map_fds[1];
13545 fixup_map_hash_48b++;
13546 } while (*fixup_map_hash_48b);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013547 }
13548
Prashant Bhole908142e2018-10-09 10:04:53 +090013549 if (*fixup_map_hash_16b) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013550 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13551 sizeof(struct other_val), 1);
Paul Chaignon5f90dd62018-04-24 15:08:19 +020013552 do {
Prashant Bhole908142e2018-10-09 10:04:53 +090013553 prog[*fixup_map_hash_16b].imm = map_fds[2];
13554 fixup_map_hash_16b++;
13555 } while (*fixup_map_hash_16b);
Paul Chaignon5f90dd62018-04-24 15:08:19 +020013556 }
13557
Prashant Bhole908142e2018-10-09 10:04:53 +090013558 if (*fixup_map_array_48b) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013559 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
13560 sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013561 do {
Prashant Bhole908142e2018-10-09 10:04:53 +090013562 prog[*fixup_map_array_48b].imm = map_fds[3];
13563 fixup_map_array_48b++;
13564 } while (*fixup_map_array_48b);
Daniel Borkmann06be0862018-06-02 23:06:31 +020013565 }
13566
13567 if (*fixup_prog1) {
Joe Stringer0c586072018-10-02 13:35:37 -070013568 map_fds[4] = create_prog_array(prog_type, 4, 0);
Daniel Borkmann06be0862018-06-02 23:06:31 +020013569 do {
13570 prog[*fixup_prog1].imm = map_fds[4];
13571 fixup_prog1++;
13572 } while (*fixup_prog1);
13573 }
13574
13575 if (*fixup_prog2) {
Joe Stringer0c586072018-10-02 13:35:37 -070013576 map_fds[5] = create_prog_array(prog_type, 8, 7);
Daniel Borkmann06be0862018-06-02 23:06:31 +020013577 do {
13578 prog[*fixup_prog2].imm = map_fds[5];
13579 fixup_prog2++;
13580 } while (*fixup_prog2);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013581 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013582
13583 if (*fixup_map_in_map) {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013584 map_fds[6] = create_map_in_map();
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013585 do {
Daniel Borkmann06be0862018-06-02 23:06:31 +020013586 prog[*fixup_map_in_map].imm = map_fds[6];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013587 fixup_map_in_map++;
13588 } while (*fixup_map_in_map);
13589 }
Roman Gushchind4c9f572018-08-02 14:27:28 -070013590
13591 if (*fixup_cgroup_storage) {
Roman Gushchina3c60542018-09-28 14:45:53 +000013592 map_fds[7] = create_cgroup_storage(false);
Roman Gushchind4c9f572018-08-02 14:27:28 -070013593 do {
13594 prog[*fixup_cgroup_storage].imm = map_fds[7];
13595 fixup_cgroup_storage++;
13596 } while (*fixup_cgroup_storage);
13597 }
Roman Gushchina3c60542018-09-28 14:45:53 +000013598
13599 if (*fixup_percpu_cgroup_storage) {
13600 map_fds[8] = create_cgroup_storage(true);
13601 do {
13602 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
13603 fixup_percpu_cgroup_storage++;
13604 } while (*fixup_percpu_cgroup_storage);
13605 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013606}
13607
13608static void do_test_single(struct bpf_test *test, bool unpriv,
13609 int *passes, int *errors)
13610{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013611 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013612 int prog_len, prog_type = test->prog_type;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013613 struct bpf_insn *prog = test->insns;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013614 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013615 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080013616 uint32_t retval;
13617 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013618
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013619 for (i = 0; i < MAX_NR_MAPS; i++)
13620 map_fds[i] = -1;
13621
Joe Stringer0c586072018-10-02 13:35:37 -070013622 if (!prog_type)
13623 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
13624 do_test_fixup(test, prog_type, prog, map_fds);
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013625 prog_len = probe_filter_length(prog);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013626
Joe Stringer0c586072018-10-02 13:35:37 -070013627 fd_prog = bpf_verify_program(prog_type, prog, prog_len,
13628 test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020013629 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013630
13631 expected_ret = unpriv && test->result_unpriv != UNDEF ?
13632 test->result_unpriv : test->result;
13633 expected_err = unpriv && test->errstr_unpriv ?
13634 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013635
13636 reject_from_alignment = fd_prog < 0 &&
13637 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
13638 strstr(bpf_vlog, "Unknown alignment.");
13639#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13640 if (reject_from_alignment) {
13641 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
13642 strerror(errno));
13643 goto fail_log;
13644 }
13645#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013646 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013647 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013648 printf("FAIL\nFailed to load prog '%s'!\n",
13649 strerror(errno));
13650 goto fail_log;
13651 }
13652 } else {
13653 if (fd_prog >= 0) {
13654 printf("FAIL\nUnexpected success to load!\n");
13655 goto fail_log;
13656 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013657 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Joe Stringer95f87a92018-02-14 13:50:34 -080013658 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
13659 expected_err, bpf_vlog);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013660 goto fail_log;
13661 }
13662 }
13663
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080013664 if (fd_prog >= 0) {
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +020013665 __u8 tmp[TEST_DATA_LEN << 2];
13666 __u32 size_tmp = sizeof(tmp);
13667
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013668 err = bpf_prog_test_run(fd_prog, 1, test->data,
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +020013669 sizeof(test->data), tmp, &size_tmp,
Daniel Borkmann93731ef2018-05-04 01:08:13 +020013670 &retval, NULL);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080013671 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
13672 printf("Unexpected bpf_prog_test_run error\n");
13673 goto fail_log;
13674 }
13675 if (!err && retval != test->retval &&
13676 test->retval != POINTER_VALUE) {
13677 printf("FAIL retval %d != %d\n", retval, test->retval);
13678 goto fail_log;
13679 }
13680 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013681 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020013682 printf("OK%s\n", reject_from_alignment ?
13683 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013684close_fds:
13685 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070013686 for (i = 0; i < MAX_NR_MAPS; i++)
13687 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013688 sched_yield();
13689 return;
13690fail_log:
13691 (*errors)++;
13692 printf("%s", bpf_vlog);
13693 goto close_fds;
13694}
13695
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013696static bool is_admin(void)
13697{
13698 cap_t caps;
13699 cap_flag_value_t sysadmin = CAP_CLEAR;
13700 const cap_value_t cap_val = CAP_SYS_ADMIN;
13701
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013702#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013703 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
13704 perror("cap_get_flag");
13705 return false;
13706 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013707#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013708 caps = cap_get_proc();
13709 if (!caps) {
13710 perror("cap_get_proc");
13711 return false;
13712 }
13713 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
13714 perror("cap_get_flag");
13715 if (cap_free(caps))
13716 perror("cap_free");
13717 return (sysadmin == CAP_SET);
13718}
13719
13720static int set_admin(bool admin)
13721{
13722 cap_t caps;
13723 const cap_value_t cap_val = CAP_SYS_ADMIN;
13724 int ret = -1;
13725
13726 caps = cap_get_proc();
13727 if (!caps) {
13728 perror("cap_get_proc");
13729 return -1;
13730 }
13731 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
13732 admin ? CAP_SET : CAP_CLEAR)) {
13733 perror("cap_set_flag");
13734 goto out;
13735 }
13736 if (cap_set_proc(caps)) {
13737 perror("cap_set_proc");
13738 goto out;
13739 }
13740 ret = 0;
13741out:
13742 if (cap_free(caps))
13743 perror("cap_free");
13744 return ret;
13745}
13746
Joe Stringer0a6748742018-02-14 13:50:36 -080013747static void get_unpriv_disabled()
13748{
13749 char buf[2];
13750 FILE *fd;
13751
13752 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
Jesper Dangaard Brouerdeea8122018-05-17 19:39:31 +020013753 if (!fd) {
13754 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
13755 unpriv_disabled = true;
13756 return;
13757 }
Joe Stringer0a6748742018-02-14 13:50:36 -080013758 if (fgets(buf, 2, fd) == buf && atoi(buf))
13759 unpriv_disabled = true;
13760 fclose(fd);
13761}
13762
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013763static int do_test(bool unpriv, unsigned int from, unsigned int to)
13764{
Joe Stringerd0a0e492018-02-14 13:50:35 -080013765 int i, passes = 0, errors = 0, skips = 0;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013766
13767 for (i = from; i < to; i++) {
13768 struct bpf_test *test = &tests[i];
13769
13770 /* Program types that are not supported by non-root we
13771 * skip right away.
13772 */
Joe Stringer0a6748742018-02-14 13:50:36 -080013773 if (!test->prog_type && unpriv_disabled) {
13774 printf("#%d/u %s SKIP\n", i, test->descr);
13775 skips++;
13776 } else if (!test->prog_type) {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013777 if (!unpriv)
13778 set_admin(false);
13779 printf("#%d/u %s ", i, test->descr);
13780 do_test_single(test, true, &passes, &errors);
13781 if (!unpriv)
13782 set_admin(true);
13783 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013784
Joe Stringerd0a0e492018-02-14 13:50:35 -080013785 if (unpriv) {
13786 printf("#%d/p %s SKIP\n", i, test->descr);
13787 skips++;
13788 } else {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013789 printf("#%d/p %s ", i, test->descr);
13790 do_test_single(test, false, &passes, &errors);
13791 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013792 }
13793
Joe Stringerd0a0e492018-02-14 13:50:35 -080013794 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
13795 skips, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020013796 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013797}
13798
13799int main(int argc, char **argv)
13800{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013801 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010013802 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013803
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013804 if (argc == 3) {
13805 unsigned int l = atoi(argv[argc - 2]);
13806 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013807
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013808 if (l < to && u < to) {
13809 from = l;
13810 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013811 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013812 } else if (argc == 2) {
13813 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013814
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013815 if (t < to) {
13816 from = t;
13817 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070013818 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013819 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013820
Joe Stringer0a6748742018-02-14 13:50:36 -080013821 get_unpriv_disabled();
13822 if (unpriv && unpriv_disabled) {
13823 printf("Cannot run as unprivileged user with sysctl %s.\n",
13824 UNPRIV_SYSCTL);
13825 return EXIT_FAILURE;
13826 }
13827
Daniel Borkmanna82d8cd2018-05-14 23:22:34 +020013828 bpf_semi_rand_init();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020013829 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070013830}